id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
195421 | #!/usr/bin/env python
#The MIT License (MIT)
#Copyright (c) 2016 <NAME>
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
#CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import cv2
import sys
class DiffMotionDetector:
"""Motion is detected through the difference between
the background (static) and the foregroung (dynamic).
This class calculated the absolute difference between two frames.
The first one is a static frame which represent the background
and the second is the image containing the moving object.
The resulting mask is passed to a threshold and cleaned from noise.
"""
def __init__(self):
"""Init the color detector object.
"""
self.background_gray = None
def setBackground(self, frame):
"""Set the BGR image used as template during the pixel selection
The template can be a spedific region of interest of the main
frame or a representative color scheme to identify. the template
is internally stored as an HSV image.
@param frame the template to use in the algorithm
"""
if(frame is None): return None
self.background_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
def getBackground(self):
"""Get the BGR image used as template during the pixel selection
The template can be a spedific region of interest of the main
frame or a representative color scheme to identify.
"""
if(self.background_gray is None):
return None
else:
return cv2.cvtColor(self.background_gray, cv2.COLOR_GRAY2BGR)
def returnMask(self, foreground_image, threshold=25):
"""Return the binary image after the detection process
@param foreground_image the frame to check
@param threshold the value used for filtering the pixels after the absdiff
"""
if(foreground_image is None): return None
foreground_gray = cv2.cvtColor(foreground_image, cv2.COLOR_BGR2GRAY)
delta_image = cv2.absdiff(self.background_gray, foreground_gray)
threshold_image = cv2.threshold(delta_image, threshold, 255, cv2.THRESH_BINARY)[1]
return threshold_image
class MogMotionDetector:
"""Motion is detected through the Mixtures of Gaussian (MOG)
This class is the implementation of the article "An Improved
Adaptive Background Mixture Model for Realtime Tracking with
Shadow Detection" by KaewTraKulPong and Bowden (2008).
ABSTRACT: Real-time segmentation of moving regions in image
sequences is a fundamental step in many vision systems
including automated visual surveillance, human-machine
interface, and very low-bandwidth telecommunications. A
typical method is background subtraction. Many background
models have been introduced to deal with different problems.
One of the successful solutions to these problems is to use a
multi-colour background model per pixel proposed by Grimson
et al [1,2,3]. However, the method suffers from slow learning
at the beginning, especially in busy environments. In addition,
it can not distinguish between moving shadows and moving objects.
This paper presents a method which improves this adaptive
background mixture model. By reinvestigating the update equations,
we utilise different equations at different phases. This allows
our system learn faster and more accurately as well as adapt
effectively to changing environments. A shadow detection scheme
is also introduced in this paper. It is based on a computational
colour space that makes use of our background model. A comparison
has been made between the two algorithms. The results show the
speed of learning and the accuracy of the model using our update
algorithm over the Grimson et al tracker. When incorporate with
the shadow detection, our method results in far better segmentation
than that of Grimson et al.
"""
def __init__(self, history=10, numberMixtures=3, backgroundRatio=0.6, noise=20):
"""Init the color detector object.
@param history lenght of the history
@param numberMixtures The maximum number of Gaussian Mixture components allowed.
Each pixel in the scene is modelled by a mixture of K Gaussian distributions.
This value should be a small number from 3 to 5.
@param backgroundRation define a threshold which specifies if a component has to be included
into the foreground or not. It is the minimum fraction of the background model.
In other words, it is the minimum prior probability that the background is in the scene.
@param noise specifies the noise strenght
"""
self.BackgroundSubtractorMOG = cv2.BackgroundSubtractorMOG(history, numberMixtures, backgroundRatio, noise)
def returnMask(self, foreground_image):
"""Return the binary image after the detection process
@param foreground_image the frame to check
@param threshold the value used for filtering the pixels after the absdiff
"""
return self.BackgroundSubtractorMOG.apply(foreground_image)
class Mog2MotionDetector:
"""Motion is detected through the Imporved Mixtures of Gaussian (MOG)
This class is the implementation of the article "Improved Adaptive
Gaussian Mixture Model for Background Subtraction" by <NAME>.
ABSTRACT: Background subtraction is a common computer vision task.
We analyze the usual pixel-level approach. We develop an efficient
adaptive algorithm using Gaussian mixture probability density.
Recursive equations are used to constantly update the parameters
and but also to simultaneously select the appropriate number of
components for each pixel.
"""
def __init__(self):
"""Init the color detector object.
"""
self.BackgroundSubtractorMOG2 = cv2.BackgroundSubtractorMOG2()
def returnMask(self, foreground_image):
"""Return the binary image after the detection process
@param foreground_image the frame to check
"""
#Since the MOG2 returns shadows with value 127 we have to
#filter these values in order to have a binary mask
img = self.BackgroundSubtractorMOG2.apply(foreground_image)
ret, thresh = cv2.threshold(img, 126, 255,cv2.THRESH_BINARY)
return thresh
def returnGreyscaleMask(self, foreground_image):
"""Return the greyscale image after the detection process
The MOG2 can return shadows. The pixels associated with
shadows have value 127. This mask is not a classic binary
mask since it incorporates the shadow pixels.
@param foreground_image the frame to check
"""
return self.BackgroundSubtractorMOG2.apply(foreground_image)
| StarcoderdataPython |
4842491 | import logging
from huobi.connection.impl.websocket_watchdog import WebSocketWatchDog
from huobi.connection.impl.websocket_manage import WebsocketManage
from huobi.connection.impl.websocket_request import WebsocketRequest
from huobi.constant.system import WebSocketDefine, ApiVersion
class SubscribeClient(object):
# static property
subscribe_watch_dog = WebSocketWatchDog()
def __init__(self, **kwargs):
"""
Create the subscription client to subscribe the update from server.
:param kwargs: The option of subscription connection.
api_key: The public key applied from Huobi.
secret_key: The private key applied from Huobi.
url: Set the URI for subscription.
init_log: to init logger
"""
self.__api_key = kwargs.get("api_key", None)
self.__secret_key = kwargs.get("secret_key", None)
self.__uri = kwargs.get("url", WebSocketDefine.Uri)
self.__init_log = kwargs.get("init_log", None)
if self.__init_log and self.__init_log:
logger = logging.getLogger("huobi-client")
logger.setLevel(level=logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
)
logger.addHandler(handler)
self.__websocket_manage_list = list()
def __create_websocket_manage(self, request):
manager = WebsocketManage(
self.__api_key, self.__secret_key, self.__uri, request
)
self.__websocket_manage_list.append(manager)
manager.connect()
SubscribeClient.subscribe_watch_dog.on_connection_created(manager)
def create_request(
self,
subscription_handler,
parse,
callback,
error_handler,
is_trade,
is_mbp_feed=False,
):
request = WebsocketRequest()
request.subscription_handler = subscription_handler
request.is_trading = is_trade
request.is_mbp_feed = is_mbp_feed
request.auto_close = (
False # subscribe need connection. websocket request need close request.
)
request.json_parser = parse
request.update_callback = callback
request.error_handler = error_handler
return request
def create_request_v1(
self, subscription_handler, parse, callback, error_handler, is_trade=False
):
request = self.create_request(
subscription_handler=subscription_handler,
parse=parse,
callback=callback,
error_handler=error_handler,
is_trade=is_trade,
)
request.api_version = ApiVersion.VERSION_V1
return request
def create_request_v2(
self, subscription_handler, parse, callback, error_handler, is_trade=False
):
request = self.create_request(
subscription_handler=subscription_handler,
parse=parse,
callback=callback,
error_handler=error_handler,
is_trade=is_trade,
)
request.api_version = ApiVersion.VERSION_V2
return request
def execute_subscribe_v1(
self, subscription_handler, parse, callback, error_handler, is_trade=False
):
request = self.create_request_v1(
subscription_handler, parse, callback, error_handler, is_trade
)
self.__create_websocket_manage(request)
def execute_subscribe_v2(
self, subscription_handler, parse, callback, error_handler, is_trade=False
):
request = self.create_request_v2(
subscription_handler, parse, callback, error_handler, is_trade
)
self.__create_websocket_manage(request)
def execute_subscribe_mbp(
self,
subscription_handler,
parse,
callback,
error_handler,
is_trade=False,
is_mbp_feed=True,
):
request = self.create_request(
subscription_handler, parse, callback, error_handler, is_trade, is_mbp_feed
)
self.__create_websocket_manage(request)
def unsubscribe_all(self):
for websocket_manage in self.__websocket_manage_list:
SubscribeClient.subscribe_watch_dog.on_connection_closed(websocket_manage)
websocket_manage.close()
self.__websocket_manage_list.clear()
| StarcoderdataPython |
3324293 | <gh_stars>0
# -*- encoding: utf-8 -*-
"""
Script para realizar a coleta dos tweets.
"""
import argparse
from watching.tweetcollector import TweetCollector
parser = argparse.ArgumentParser(description='Coletor de tweets.')
parser.add_argument('--run-forever', action='store_true', help='O programa não é finalizado neste modo e as buscas são'
' realizadas automaticamente em intervalos. Use'
' "--interval" se desejar especificar o intervalo entre'
' as buscas.')
parser.add_argument('--interval', type=int, default=30, help='Intervalo em segundo entre as buscas.')
args = parser.parse_args()
def main():
collector = TweetCollector(run_forever=args.run_forever, interval=args.interval)
collector.start()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1625644 | from flask import render_template,redirect,url_for,flash,request
from ..models import User, Subscriber, Post
from .forms import LoginForm, RegistrationForm, SubscriberForm
from .. import db
from . import auth
from ..email import mail_message
from flask_login import login_user,logout_user,login_required, current_user
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,password = form.password.data)
db.session.add(user)
db.session.commit()
mail_message("Welcome to Blogtech","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html',registration_form=form)
@auth.route('/login',methods=['GET','POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "Scribble login"
return render_template('auth/login.html',login_form = login_form,title=title)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
@auth.route('/subscribe', methods=['GET','POST'])
def subscriber():
subscriber_form = SubscriberForm()
if subscriber_form.validate_on_submit():
blogs = Post.query.order_by(Post.date_created.desc()).all()
subscriber = Post.query.all()
blogs = Post.query.all()
subscriber= Subscriber(email=subscriber_form.email.data,name = subscriber_form.name.data)
db.session.add(subscriber)
db.session.commit()
mail_message("Welcome to BlogTech.com","email/welcome_subscriber",subscriber.email,subscriber=subscriber)
title= "BlogTech.com"
return redirect(url_for('main.blog', title=title, blogs=blogs, subscriber_form=subscriber_form))
return render_template('subscribe.html',subscriber_form=subscriber_form) | StarcoderdataPython |
1716364 | <filename>src/bq_test_kit/data_literal_transformers/__init__.py
# Copyright (c) 2020 <NAME>
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
# C0114 disabled because this module contains only export
# pylint: disable=C0114
from bq_test_kit.data_literal_transformers.base_data_literal_transformer import \
BaseDataLiteralTransformer
from bq_test_kit.data_literal_transformers.dsv_data_literal_transformer import \
DsvDataLiteralTransformer
from bq_test_kit.data_literal_transformers.json_data_literal_transformer import \
JsonDataLiteralTransformer
__all__ = [
"BaseDataLiteralTransformer",
"DsvDataLiteralTransformer",
"JsonDataLiteralTransformer"
]
| StarcoderdataPython |
1683747 | import core
import proto.framework_pb2 as framework_pb2
from framework import OpProtoHolder, Variable, Program, Operator
from initializer import Constant, Normal, Xavier, Initializer
from paddle.v2.fluid.layer_helper import LayerHelper, unique_name
import re
import cStringIO
from param_attr import ParamAttr
import contextlib
__all__ = [
'fc', 'data', 'cross_entropy', 'conv2d', 'pool2d', 'embedding', 'concat',
'StaticRNN', 'cast', 'sequence_conv', 'sequence_pool', 'sums', 'cos_sim',
'batch_norm', 'accuracy', 'split_lod_tensor', 'While'
]
def fc(input,
size,
num_flatten_dims=1,
param_attr=None,
bias_attr=None,
act=None,
name=None,
main_program=None,
startup_program=None):
"""
Fully Connected Layer.
Args:
input: The input tensor to the function
size: The size of the layer
num_flatten_dims: Number of columns in input
param_attr: The parameters/weights to the FC Layer
param_initializer: Initializer used for the weight/parameter. If None, XavierInitializer() is used
bias_attr: The bias parameter for the FC layer
bias_initializer: Initializer used for the bias. If None, then ConstantInitializer() is used
act: Activation to be applied to the output of FC layer
name: Name/alias of the function
main_program: Name of the main program that calls this
startup_program: Name of the startup program
This function can take in multiple inputs and performs the Fully Connected
function (linear transformation) on top of each of them.
So for input x, the output will be : Wx + b. Where W is the parameter,
b the bias and x is the input.
The function also applies an activation (non-linearity) on top of the
output, if activation is passed in the input.
All the input variables of this function are passed in as local variables
to the LayerHelper constructor.
"""
helper = LayerHelper('fc', **locals())
dtype = helper.input_dtype()
mul_results = []
for input_var, param_attr in helper.iter_inputs_and_params():
input_shape = input_var.shape
param_shape = [
reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1)
] + [size]
w = helper.create_parameter(
attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False)
tmp = helper.create_tmp_variable(dtype)
helper.append_op(
type="mul",
inputs={
"X": input_var,
"Y": w,
},
outputs={"Out": tmp},
attrs={'x_num_col_dims': num_flatten_dims,
'y_num_col_dims': 1})
mul_results.append(tmp)
# sum
if len(mul_results) == 1:
pre_bias = mul_results[0]
else:
pre_bias = helper.create_tmp_variable(dtype)
helper.append_op(
type="sum", inputs={"X": mul_results}, outputs={"Out": pre_bias})
# add bias
pre_activation = helper.append_bias_op(pre_bias)
# add activation
return helper.append_activation(pre_activation)
def embedding(input,
size,
is_sparse=False,
param_attr=None,
dtype='float32',
main_program=None,
startup_program=None):
"""
Embedding Layer.
Args:
param_initializer:
input: The input to the function
size: The size of the layer
is_sparse: A flag that decleares whether the input is sparse
param_attr: Parameters for this layer
dtype: The type of data : float32, float_16, int etc
main_program: Name of the main program that calls this
startup_program: Name of the startup program
This function can take in the input (which is a vector of IDs) and
performs a lookup in the lookup_table using these IDs, to result into
the embedding of each ID in the input.
All the input variables of this function are passed in as local variables
to the LayerHelper constructor.
"""
helper = LayerHelper('embedding', **locals())
w = helper.create_parameter(
attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False)
tmp = helper.create_tmp_variable(dtype)
helper.append_op(
type='lookup_table',
inputs={'Ids': input,
'W': w},
outputs={'Out': tmp},
attrs={'is_sparse': is_sparse})
return tmp
# TODO(qijun): expose H0 and C0
def dynamic_lstm(input,
size,
param_attr=None,
bias_attr=None,
use_peepholes=True,
is_reverse=False,
gate_activation='sigmoid',
cell_activation='tanh',
candidate_activation='tanh',
dtype='float32',
main_program=None,
startup_program=None):
helper = LayerHelper('lstm', **locals())
size = size / 4
weight = helper.create_parameter(
attr=helper.param_attr, shape=[size, 4 * size], dtype=dtype)
bias_size = [1, 7 * size]
if not use_peepholes:
bias_size[1] = 4 * size
bias = helper.create_parameter(
attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True)
hidden = helper.create_tmp_variable(dtype)
cell = helper.create_tmp_variable(dtype)
batch_gate = helper.create_tmp_variable(dtype)
batch_cell_pre_act = helper.create_tmp_variable(dtype)
helper.append_op(
type='lstm',
inputs={'Input': input,
'Weight': weight,
'Bias': bias},
outputs={
'Hidden': hidden,
'Cell': cell,
'BatchGate': batch_gate,
'BatchCellPreAct': batch_cell_pre_act
},
attrs={
'use_peepholes': use_peepholes,
'is_reverse': is_reverse,
'gate_activation': gate_activation,
'cell_activation': cell_activation,
'candidate_activation': candidate_activation
})
return hidden, cell
def gru_unit(input,
hidden,
size,
weight=None,
bias=None,
activation='tanh',
gate_activation='sigmoid',
main_program=None,
startup_program=None):
"""
GRUUnit Operator implements partial calculations of the GRU unit as following:
$$
update \ gate: u_t = actGate(xu_t + W_u * h_{t-1} + b_u) \\
reset \ gate: r_t = actGate(xr_t + W_r * h_{t-1} + b_r) \\
output \ candidate: {h}_t = actNode(xc_t + W_c * dot(r_t, h_{t-1}) + b_c) \\
output: h_t = dot((1 - u_t), h_{t-1}) + dot(u_t, {h}_t)
$$
which is same as one time step of GRU Operator.
@note To implement the complete GRU unit, fully-connected operator must be
used before to feed xu, xr and xc as the Input of GRUUnit operator.
TODO(ChunweiYan) add more document here
"""
activation_dict = dict(
identity=0,
sigmoid=1,
tanh=2,
relu=3, )
activation = activation_dict[activation]
gate_activation = activation_dict[gate_activation]
helper = LayerHelper('gru_unit', **locals())
dtype = helper.input_dtype()
size = size / 3
# create weight
if weight is None:
weight = helper.create_parameter(
attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype)
# create bias
if bias is None:
bias_size = [1, 3 * size]
bias = helper.create_parameter(
attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True)
gate = helper.create_tmp_variable(dtype)
reset_hidden_pre = helper.create_tmp_variable(dtype)
updated_hidden = helper.create_tmp_variable(dtype)
helper.append_op(
type='gru_unit',
inputs={'Input': input,
'HiddenPrev': hidden,
'Weight': weight},
outputs={
'Gate': gate,
'ResetHiddenPrev': reset_hidden_pre,
'Hidden': updated_hidden,
},
attrs={
'activation': 0,
'gate_activation': 1,
})
return updated_hidden, reset_hidden_pre, gate
def data(name,
shape,
append_batch_size=True,
dtype='float32',
lod_level=0,
type=core.VarDesc.VarType.LOD_TENSOR,
main_program=None,
startup_program=None,
stop_gradient=True):
"""
Data Layer.
Args:
name: The name/alias of the function
shape: Tuple declaring the shape.
append_batch_size: Whether or not to append the data as a batch.
dtype: The type of data : float32, float_16, int etc
type: The output type. By default it is LOD_TENSOR.
lod_level(int): The LoD Level. 0 means the input data is not a sequence.
main_program: Name of the main program that calls this
startup_program: Name of the startup program
stop_gradient: A boolean that mentions whether gradient should flow.
This function takes in input and based on whether data has
to be returned back as a minibatch, it creates the global variable using
the helper functions. The global variables can be accessed by all the
following operations and layers in the graph.
All the input variables of this function are passed in as local variables
to the LayerHelper constructor.
"""
helper = LayerHelper('data', **locals())
shape = list(shape)
for i in xrange(len(shape)):
if shape[i] is None:
shape[i] = -1
append_batch_size = False
elif shape[i] < 0:
append_batch_size = False
if append_batch_size:
shape = [-1] + shape # append batch size as -1
return helper.create_global_variable(
name=name,
shape=shape,
dtype=dtype,
type=type,
stop_gradient=stop_gradient,
lod_level=lod_level)
def create_tensor(dtype, name=None, main_program=None, startup_program=None):
helper = LayerHelper("create_tensor", **locals())
return helper.create_variable(name=helper.name, dtype=dtype)
def _convert_(name):
"""
Formatting.
Args:
name: The name/alias
This function takes in a name and converts it to a standard format of
group1_group2. Where as per the regular expression, group1 can have
alphabets and numbers and group2 has capital alphabets.
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def _generate_doc_string_(op_proto):
"""
Generate docstring by OpProto
Args:
op_proto (framework_pb2.OpProto): a protobuf message typed OpProto
Returns:
str: the document string
"""
def _type_to_str_(tp):
return framework_pb2.AttrType.Name(tp)
if not isinstance(op_proto, framework_pb2.OpProto):
raise TypeError("OpProto should be `framework_pb2.OpProto`")
buf = cStringIO.StringIO()
buf.write(op_proto.comment)
buf.write('\nArgs:\n')
for each_input in op_proto.inputs:
line_begin = ' {0}: '.format(_convert_(each_input.name))
buf.write(line_begin)
buf.write(each_input.comment)
buf.write('\n')
buf.write(' ' * len(line_begin))
buf.write('Duplicable: ')
buf.write(str(each_input.duplicable))
buf.write(' Optional: ')
buf.write(str(each_input.dispensable))
buf.write('\n')
for each_attr in op_proto.attrs:
buf.write(' ')
buf.write(each_attr.name)
buf.write(' (')
buf.write(_type_to_str_(each_attr.type))
buf.write('): ')
buf.write(each_attr.comment)
buf.write('\n')
if len(op_proto.outputs) != 0:
buf.write('\nReturns:\n')
buf.write(' ')
for each_opt in op_proto.outputs:
if not each_opt.intermediate:
break
buf.write(each_opt.comment)
return buf.getvalue()
def _create_op_func_(op_type):
"""
Create an Operator for a Function.
Args:
op_type: The name of the operator to be created
This function takes in the operator type (sigmoid, mean , average etc) and
creates the operator functionality.
"""
op_proto = OpProtoHolder.instance().get_op_proto(op_type)
not_intermediate_outputs = \
filter(lambda output: not output.intermediate, op_proto.outputs)
intermediate_outputs = \
filter(lambda output: output.intermediate, op_proto.outputs)
if len(not_intermediate_outputs) != 1:
raise ValueError("Only one non intermediate output operator can be",
"automatically generated")
if not_intermediate_outputs[0].duplicable:
raise ValueError(
"Only non duplicable op can be automatically generated")
for output in intermediate_outputs:
if output.duplicable:
raise ValueError("The op can be automatically generated only when ",
"all intermediate ops are not duplicable")
o_name = not_intermediate_outputs[0].name
intermediate_output_names = [output.name for output in intermediate_outputs]
def infer_and_check_dtype(op_proto, **kwargs):
"""
This function performs the sanity check for dtype and
instance type.
"""
dtype = None
for ipt in op_proto.inputs:
name = _convert_(ipt.name)
val = kwargs.pop(name, [])
if not isinstance(val, list) and not isinstance(val, tuple):
val = [val]
for each in val:
if not isinstance(each, Variable):
raise ValueError("input of {0} must be variable".format(
op_type))
if dtype is None:
dtype = each.dtype
elif dtype != each.dtype:
raise ValueError(
"operator {0} must input same dtype. {1} vs {2}".format(
op_type, dtype, each.dtype))
return dtype
def func(**kwargs):
helper = LayerHelper(op_type, **kwargs)
dtype = infer_and_check_dtype(op_proto, **kwargs)
inputs = dict()
for ipt in op_proto.inputs:
name = _convert_(ipt.name)
val = kwargs.pop(name, [])
if not isinstance(val, list) and not isinstance(val, tuple):
val = [val]
inputs[ipt.name] = val
outputs = dict()
out = helper.create_tmp_variable(dtype=dtype)
outputs[o_name] = [out]
for name in intermediate_output_names:
outputs[name] = [helper.create_tmp_variable(dtype=dtype)]
helper.append_op(
type=op_type, inputs=inputs, outputs=outputs, attrs=kwargs)
return helper.append_activation(out)
func.__name__ = op_type
globals()[op_type] = func
func.__doc__ = _generate_doc_string_(op_proto)
global __all__
__all__.append(op_type)
_create_op_func_('mean')
_create_op_func_('mul')
_create_op_func_('elementwise_add')
_create_op_func_('elementwise_div')
_create_op_func_('dropout')
_create_op_func_('reshape')
_create_op_func_('sigmoid')
_create_op_func_('scale')
_create_op_func_('reshape')
_create_op_func_('transpose')
_create_op_func_('sigmoid_cross_entropy_with_logits')
def cast(x, dtype, main_program=None):
"""
This function takes in the input with input_dtype
and casts it to the output_dtype as the output.
"""
helper = LayerHelper('cast', **locals())
out = helper.create_tmp_variable(dtype=dtype)
helper.append_op(
type='cast',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'in_dtype': x.dtype,
'out_dtype': out.dtype})
return out
def concat(input, axis, main_program=None, startup_program=None):
"""
This function concats the input along the axis mentioned
and returns that as the output.
"""
helper = LayerHelper('concat', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(
type='concat',
inputs={'X': input},
outputs={'Out': [out]},
attrs={'axis': axis})
return out
def sums(input, out=None, main_program=None, startup_program=None):
"""
This function takes in the input and performs the sum operation on it
and returns that as the output.
"""
helper = LayerHelper('sum', **locals())
if out is None:
out = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(type='sum', inputs={'X': input}, outputs={'Out': out})
return out
def linear_chain_crf(input,
label,
param_attr=None,
main_program=None,
startup_program=None):
helper = LayerHelper('linear_chain_crf', **locals())
size = input.shape[1]
transition = helper.create_parameter(
attr=helper.param_attr,
shape=[size + 2, size],
dtype=helper.input_dtype())
alpha = helper.create_tmp_variable(dtype=helper.input_dtype())
emission_exps = helper.create_tmp_variable(dtype=helper.input_dtype())
transition_exps = helper.create_tmp_variable(dtype=helper.input_dtype())
log_likelihood = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(
type='linear_chain_crf',
inputs={"Emission": [input],
"Transition": transition,
"Label": label},
outputs={
"Alpha": [alpha],
"EmissionExps": [emission_exps],
"TransitionExps": transition_exps,
"LogLikelihood": log_likelihood
})
return log_likelihood
def crf_decoding(input,
param_attr,
label=None,
main_program=None,
startup_program=None):
helper = LayerHelper('crf_decoding', **locals())
transition = helper.get_parameter(param_attr.name)
viterbi_path = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(
type='crf_decoding',
inputs={"Emission": [input],
"Transition": transition,
"Label": label},
outputs={"ViterbiPath": [viterbi_path]})
return viterbi_path
def assign(input, output, main_program=None, startup_program=None):
helper = LayerHelper('assign', **locals())
helper.append_op(
type='scale',
inputs={'X': [input]},
outputs={'Out': [output]},
attrs={'scale': 1.0})
return output
def split_lod_tensor(input,
mask,
level=0,
main_program=None,
startup_program=None):
helper = LayerHelper('split_lod_tensor', **locals())
out_true = helper.create_tmp_variable(dtype=input.dtype)
out_false = helper.create_tmp_variable(dtype=input.dtype)
helper.append_op(
type='split_lod_tensor',
inputs={
'X': input,
'Mask': mask,
},
outputs={'OutTrue': out_true,
'OutFalse': out_false},
attrs={'level': level})
return out_true, out_false
def merge_lod_tensor(in_true,
in_false,
x,
mask,
level=0,
main_program=None,
startup_program=None):
helper = LayerHelper('merge_lod_tensor', **locals())
out = helper.create_tmp_variable(dtype=in_true.dtype)
helper.append_op(
type='merge_lod_tensor',
inputs={'X': x,
'Mask': mask,
'InTrue': in_true,
'InFalse': in_false},
outputs={'Out': out},
attrs={'level': level})
return out
def cos_sim(X, Y, **kwargs):
"""
This function performs the cosine similarity between two tensors
X and Y and returns that as the output.
"""
helper = LayerHelper('cos_sim', **kwargs)
out = helper.create_tmp_variable(dtype=X.dtype)
xnorm = helper.create_tmp_variable(dtype=X.dtype)
ynorm = helper.create_tmp_variable(dtype=X.dtype)
helper.append_op(
type='cos_sim',
inputs={'X': [X],
'Y': [Y]},
outputs={'Out': [out],
'XNorm': [xnorm],
'YNorm': [ynorm]})
return out
def cross_entropy(input, label, **kwargs):
"""
This function computes cross_entropy using the input and label.
"""
helper = LayerHelper('cross_entropy', **kwargs)
out = helper.create_tmp_variable(dtype=input.dtype)
helper.append_op(
type='cross_entropy',
inputs={'X': [input],
'Label': [label]},
outputs={'Y': [out]},
attrs=kwargs)
return out
def square_error_cost(input, label, **kwargs):
"""
This functions returns the squared error cost using the input and label.
The output is appending the op to do the above.
"""
helper = LayerHelper('square_error_cost', **kwargs)
minus_out = helper.create_tmp_variable(dtype=input.dtype)
helper.append_op(
type='elementwise_sub',
inputs={'X': [input],
'Y': [label]},
outputs={'Out': [minus_out]})
square_out = helper.create_tmp_variable(dtype=input.dtype)
helper.append_op(
type='square', inputs={'X': [minus_out]}, outputs={'Y': [square_out]})
return square_out
def accuracy(input, label, k=1, correct=None, total=None, **kwargs):
"""
This function computes the accuracy using the input and label.
The output is the top_k inputs and their indices.
"""
helper = LayerHelper("accuracy", **kwargs)
topk_out = helper.create_tmp_variable(dtype=input.dtype)
topk_indices = helper.create_tmp_variable(dtype="int64")
helper.append_op(
type="top_k",
inputs={"X": [input]},
outputs={"Out": [topk_out],
"Indices": [topk_indices]},
attrs={"k": k})
acc_out = helper.create_tmp_variable(dtype="float32")
if correct is None:
correct = helper.create_tmp_variable(dtype="int64")
if total is None:
total = helper.create_tmp_variable(dtype="int64")
helper.append_op(
type="accuracy",
inputs={
"Out": [topk_out],
"Indices": [topk_indices],
"Label": [label]
},
outputs={
"Accuracy": [acc_out],
"Correct": [correct],
"Total": [total],
})
return acc_out
def chunk_eval(input,
label,
chunk_scheme,
num_chunk_types,
excluded_chunk_types=None,
**kwargs):
"""
This function computes the accuracy using the input and label.
The output is the top_k inputs and their indices.
"""
helper = LayerHelper("chunk_eval", **kwargs)
# prepare output
precision = helper.create_tmp_variable(dtype="float32")
recall = helper.create_tmp_variable(dtype="float32")
f1_score = helper.create_tmp_variable(dtype="float32")
helper.append_op(
type="chunk_eval",
inputs={"Inference": [input],
"Label": [label]},
outputs={
"Precision": [precision],
"Recall": [recall],
"F1-Score": [f1_score]
},
attrs={
"num_chunk_types": num_chunk_types,
'chunk_scheme': chunk_scheme,
'excluded_chunk_types': excluded_chunk_types or []
})
return precision, recall, f1_score
def sequence_conv(input,
num_filters,
filter_size=3,
filter_stride=1,
padding=None,
bias_attr=None,
param_attr=None,
act=None,
main_program=None,
startup_program=None):
"""
This function creates the op for sequence_conv, using the inputs and
other convolutional configurations for the filters and stride as given
in the input parameters to the function.
"""
# FIXME(dzh) : want to unify the argument of python layer
# function. So we ignore some unecessary attributes.
# such as, padding_trainable, context_start.
helper = LayerHelper('sequence_conv', **locals())
dtype = helper.input_dtype()
filter_shape = [filter_size * input.shape[1], num_filters]
filter = helper.create_parameter(
attr=helper.param_attr, shape=filter_shape, dtype=dtype)
pre_bias = helper.create_tmp_variable(dtype)
helper.append_op(
type='sequence_conv',
inputs={
'X': [input],
'Filter': [filter],
},
outputs={"Out": pre_bias},
attrs={
'contextStride': filter_stride,
'contextStart': -int(filter_size / 2),
'contextLength': filter_size
})
pre_act = helper.append_bias_op(pre_bias)
return helper.append_activation(pre_act)
def conv2d(input,
num_filters,
filter_size,
stride=[1, 1],
padding=None,
groups=None,
param_attr=None,
bias_attr=None,
act=None,
name=None,
main_program=None,
startup_program=None):
"""
This function creates the op for a 2-dimensional Convolution.
This is performed using the parameters of filters(size, dimensionality etc)
, stride and other configurations for a Convolution operation.
This funciton can also append an activation on top of the
conv-2d output, if mentioned in the input parameters.
"""
helper = LayerHelper('conv2d', **locals())
dtype = helper.input_dtype()
num_channels = input.shape[1]
if groups is None:
num_filter_channels = num_channels
else:
if num_channels % groups != 0:
raise ValueError("num_channels must be divisible by groups.")
num_filter_channels = num_channels / groups
if isinstance(filter_size, int):
filter_size = [filter_size, filter_size]
if isinstance(stride, int):
stride = [stride, stride]
if isinstance(padding, int):
padding = [padding, padding]
input_shape = input.shape
filter_shape = [num_filters, num_filter_channels] + filter_size
def _get_default_param_initializer():
std = (2.0 / (filter_size[0]**2 * num_channels))**0.5
return Normal(0.0, std, 0)
filter = helper.create_parameter(
attr=helper.param_attr,
shape=filter_shape,
dtype=dtype,
default_initializer=_get_default_param_initializer())
pre_bias = helper.create_tmp_variable(dtype)
helper.append_op(
type='conv2d_cudnn',
inputs={
'Input': input,
'Filter': filter,
},
outputs={"Output": pre_bias},
attrs={'strides': stride,
'paddings': padding,
'groups': groups})
pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
return helper.append_activation(pre_act)
def sequence_pool(input, pool_type, **kwargs):
"""
This function add the operator for sequence pooling.
This is applied on top of the input using pool_type mentioned
in the parameters.
"""
helper = LayerHelper('sequence_pool', input=input, **kwargs)
dtype = helper.input_dtype()
pool_out = helper.create_tmp_variable(dtype)
max_index = helper.create_tmp_variable(dtype)
helper.append_op(
type="sequence_pool",
inputs={"X": input},
outputs={"Out": pool_out,
"MaxIndex": max_index},
attrs={"pooltype": pool_type.upper()})
return pool_out
def pool2d(input,
pool_size,
pool_type,
pool_stride=[1, 1],
pool_padding=[0, 0],
global_pooling=False,
main_program=None,
startup_program=None):
"""
This function adds the operator for pooling in 2 dimensions, using the
pooling configurations mentioned in input parameters.
"""
if pool_type not in ["max", "avg"]:
raise ValueError(
"Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
str(pool_type))
if isinstance(pool_size, int):
pool_size = [pool_size, pool_size]
if isinstance(pool_stride, int):
pool_stride = [pool_stride, pool_stride]
if isinstance(pool_padding, int):
pool_padding = [pool_padding, pool_padding]
helper = LayerHelper('pool2d', **locals())
dtype = helper.input_dtype()
pool_out = helper.create_tmp_variable(dtype)
helper.append_op(
type="pool2d",
inputs={"X": input},
outputs={"Out": pool_out},
attrs={
"pooling_type": pool_type,
"ksize": pool_size,
"global_pooling": global_pooling,
"strides": pool_stride,
"paddings": pool_padding
})
return pool_out
def batch_norm(input,
act=None,
is_test=False,
momentum=0.9,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
data_layout='NCHW',
main_program=None,
startup_program=None):
"""
This function helps create an operator to implement
the BatchNorm layer using the configurations from the input parameters.
"""
helper = LayerHelper('batch_norm', **locals())
dtype = helper.input_dtype()
input_shape = input.shape
if data_layout == 'NCHW':
channel_num = input_shape[1]
else:
if data_layout == 'NHWC':
channel_num = input_shape[-1]
else:
raise ValueError("unsupported data layout:" + data_layout)
param_shape = [channel_num]
# create parameter
scale = helper.create_parameter(
attr=helper.param_attr,
shape=param_shape,
dtype=dtype,
default_initializer=Constant(1.0))
bias = helper.create_parameter(
attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=True)
mean = helper.create_global_variable(
dtype=input.dtype, shape=param_shape, persistable=True)
helper.set_variable_initializer(var=mean, initializer=Constant(0.0))
variance = helper.create_global_variable(
dtype=input.dtype, shape=param_shape, persistable=True)
helper.set_variable_initializer(var=variance, initializer=Constant(1.0))
# create output
# mean and mean_out share the same memory
mean_out = mean
# variance and variance out share the same memory
variance_out = variance
saved_mean = helper.create_tmp_variable(dtype)
saved_variance = helper.create_tmp_variable(dtype)
batch_norm_out = helper.create_tmp_variable(dtype)
helper.append_op(
type="batch_norm",
inputs={
"X": input,
"Scale": scale,
"Bias": bias,
"Mean": mean,
"Variance": variance
},
outputs={
"Y": batch_norm_out,
"MeanOut": mean_out,
"VarianceOut": variance_out,
"SavedMean": saved_mean,
"SavedVariance": saved_variance
},
attrs={"momentum": momentum,
"epsilon": epsilon,
"is_test": is_test})
return helper.append_activation(batch_norm_out)
def beam_search_decode(ids, scores, main_program=None, startup_program=None):
helper = LayerHelper('beam_search_decode', **locals())
sentence_ids = helper.create_tmp_variable(dtype=ids.dtype)
sentence_scores = helper.create_tmp_variable(dtype=ids.dtype)
helper.append_op(
type="beam_search_decode",
inputs={"Ids": ids,
"Scores": scores},
outputs={
"SentenceIds": sentence_ids,
"SentenceScores": sentence_scores
})
return sentence_ids, sentence_scores
class BlockGuard(object):
"""
BlockGuard class.
BlockGuard class is used to create a sub-block in a program by
using the Python `with` keyword.
"""
def __init__(self, main_program):
if not isinstance(main_program, Program):
raise TypeError("BlockGuard takes a program")
self.main_program = main_program
def __enter__(self):
self.main_program.create_block()
def __exit__(self, exc_type, exc_val, exc_tb):
self.main_program.rollback()
if exc_type is not None:
return False # re-raise exception
return True
class StaticRNNGuard(BlockGuard):
"""
StaticRNNGuard class.
StaticRNNGuard class is used to create a StaticRNN block in a program.
"""
def __init__(self, rnn):
if not isinstance(rnn, StaticRNN):
raise TypeError("StaticRNNGuard takes a StaticRNN")
super(StaticRNNGuard, self).__init__(rnn.helper.main_program)
self.rnn = rnn
def __enter__(self):
self.rnn.status = StaticRNN.IN_RNN_BLOCK
return super(StaticRNNGuard, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.rnn.status = StaticRNN.AFTER_RNN_BLOCK
self.rnn.complete_rnn_op()
return super(StaticRNNGuard, self).__exit__(exc_type, exc_val, exc_tb)
class StaticRNNMemoryLink(object):
"""
StaticRNNMemoryLink class.
Args:
init: the initial variable for Memory
init: Variable
pre_mem: the memory variable in previous time step
pre_mem: Variable
mem: the memory variable in current time step
mem: Variable
StaticRNNMemoryLink class is used to create a link between two
memory cells of a StaticRNN.
"""
def __init__(self, init, pre_mem, mem=None):
self.init = init
self.pre_mem = pre_mem
self.mem = mem
class StaticRNN(object):
"""
StaticRNN class.
StaticRNN class is used to create a StaticRNN. The RNN will have its
own parameters like inputs, outputs, memories, status and length.
"""
BEFORE_RNN_BLOCK = 0
IN_RNN_BLOCK = 1
AFTER_RNN_BLOCK = 2
def __init__(self, name=None, main_program=None):
self.helper = LayerHelper(
"static_rnn", name=name, main_program=main_program)
self.memories = {} # memory map, from pre_mem.name --> MemoryLink
self.inputs = [] # input variable list in current block
self.outputs = [] # output variable list in parent block
self.status = StaticRNN.BEFORE_RNN_BLOCK # status flag.
# sequence length, since it is a static RNN, sequence length are fixed.
self.seq_len = None
def step(self):
return StaticRNNGuard(self)
def _assert_in_rnn_block_(self, method):
if self.status != StaticRNN.IN_RNN_BLOCK:
raise ValueError("You must invoke {0} in rnn block".format(method))
def memory(self,
init=None,
shape=None,
batch_ref=None,
init_value=0.0,
init_batch_dim_idx=0,
ref_batch_dim_idx=1):
"""
Args:
init: boot memory, if not set, a shape, batch_ref must be provided
shape: shape of the boot memory
batch_ref: batch size reference variable
init_value: the init value of boot memory
init_batch_dim_idx: the index of batch size in init's dimension
ref_batch_dim_idx: the index of batch size in batch_ref's dimension
"""
self._assert_in_rnn_block_('memory')
if init is None:
if shape is None or batch_ref is None:
raise ValueError(
"if init is None, memory at least need shape and batch_ref")
parent_block = self.parent_block()
var_name = unique_name("@".join([self.helper.name, "memory_boot"]))
boot_var = parent_block.create_var(
name=var_name,
shape=shape,
dtype=batch_ref.dtype,
persistable=False)
parent_block.append_op(
type="fill_constant_batch_size_like",
inputs={'Input': [batch_ref]},
outputs={'Out': [boot_var]},
attrs={
'value': init_value,
'shape': boot_var.shape,
'dtype': boot_var.dtype,
'input_dim_idx': ref_batch_dim_idx,
'output_dim_idx': init_batch_dim_idx
})
return self.memory(init=boot_var)
else:
pre_mem = self.helper.create_variable(
name=unique_name("@".join([self.helper.name, "mem"])),
dtype=init.dtype,
shape=init.shape)
self.memories[pre_mem.name] = StaticRNNMemoryLink(
init=init, pre_mem=pre_mem)
return pre_mem
def step_input(self, x):
self._assert_in_rnn_block_('step_input')
if not isinstance(x, Variable):
raise TypeError("step input takes a Variable")
if self.seq_len is None:
self.seq_len = x.shape[0]
elif self.seq_len != x.shape[0]:
raise ValueError("Static RNN only take fix seq_len input")
ipt = self.helper.create_variable(
name=x.name, dtype=x.dtype, shape=list(x.shape[1:]), type=x.type)
self.inputs.append(ipt)
return ipt
def step_output(self, o):
self._assert_in_rnn_block_('step_output')
if not isinstance(o, Variable):
raise TypeError("step output takes a Variable")
tmp_o = self.helper.create_tmp_variable(dtype=o.dtype)
self.helper.append_op(
type='rnn_memory_helper',
inputs={'X': [o]},
outputs={'Out': tmp_o},
attrs={'dtype': o.dtype})
out_var = self.parent_block().create_var(
name=tmp_o.name,
shape=[self.seq_len] + list(tmp_o.shape),
dtype=tmp_o.dtype)
self.outputs.append(out_var)
def output(self, *outputs):
for each in outputs:
self.step_output(each)
def update_memory(self, mem, var):
if not isinstance(mem, Variable) or not isinstance(var, Variable):
raise TypeError("update memory should take variables")
self.memories[mem.name].mem = var
def parent_block(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def __call__(self, *args, **kwargs):
if self.status != StaticRNN.AFTER_RNN_BLOCK:
raise ValueError("RNN output can only be retrieved after rnn block")
if len(self.outputs) == 0:
raise ValueError("RNN has no output")
elif len(self.outputs) == 1:
return self.outputs[0]
else:
return self.outputs
def complete_rnn_op(self):
main_program = self.helper.main_program
rnn_block = main_program.current_block()
parent_block = self.parent_block()
local_inputs = set()
for op in rnn_block.ops:
assert isinstance(op, Operator)
for oname in op.output_names:
for out_var_name in op.output(oname):
local_inputs.add(out_var_name)
for var in self.inputs:
local_inputs.add(var.name)
for m in self.memories:
local_inputs.add(m)
params = list()
for op in rnn_block.ops:
assert isinstance(op, Operator)
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in local_inputs:
params.append(in_var_name)
parameters = [parent_block.var(name) for name in params]
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
inlinks = [parent_block.var(i.name) for i in self.inputs]
outlinks = self.outputs
boot_memories = []
pre_memories = []
memories = []
for _, mem in self.memories.iteritems():
boot_memories.append(mem.init)
pre_memories.append(mem.pre_mem.name)
mem_var = rnn_block.var(mem.mem.name)
assert isinstance(mem_var, Variable)
new_mem = self.helper.create_tmp_variable(dtype=mem_var.dtype)
rnn_block.append_op(
type='rnn_memory_helper',
inputs={'X': [mem_var]},
outputs={'Out': [new_mem]},
attrs={'dtype': mem_var.dtype})
memories.append(new_mem.name)
parent_block.append_op(
type='recurrent',
inputs={
'inputs': inlinks,
'initial_states': boot_memories,
'parameters': parameters
},
outputs={'outputs': outlinks,
'step_scopes': [step_scope]},
attrs={
'ex_states': pre_memories,
'states': memories,
'step_block': rnn_block
})
class WhileGuard(BlockGuard):
def __init__(self, while_op):
if not isinstance(while_op, While):
raise TypeError("WhileGuard takes a while op")
super(WhileGuard, self).__init__(while_op.helper.main_program)
self.while_op = while_op
def __enter__(self):
self.while_op.status = While.IN_WHILE_BLOCK
return super(WhileGuard, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.while_op.status = While.AFTER_WHILE_BLOCK
self.while_op.complete()
return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb)
class While(object):
BEFORE_WHILE_BLOCK = 0
IN_WHILE_BLOCK = 1
AFTER_WHILE_BLOCK = 2
def __init__(self, cond, name=None, main_program=None):
self.helper = LayerHelper("while", name=name, main_program=main_program)
self.status = While.BEFORE_WHILE_BLOCK
if not isinstance(cond, Variable):
raise TypeError("condition should be a variable")
assert isinstance(cond, Variable)
if cond.dtype != core.DataType.BOOL:
raise TypeError("condition should be a bool variable")
if reduce(lambda a, b: a * b, cond.shape, 1) != 1:
raise TypeError("condition should be a bool scalar")
self.cond_var = cond
def block(self):
return WhileGuard(self)
def complete(self):
main_program = self.helper.main_program
while_block = main_program.current_block()
parent_block = main_program.block(main_program.current_block()
.parent_idx)
inner_outputs = {self.cond_var.name}
x_name_list = set()
for op in while_block.ops:
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in inner_outputs:
x_name_list.add(in_var_name)
for oname in op.output_names:
for out_var_name in op.output(oname):
inner_outputs.add(out_var_name)
out_vars = []
for inner_out_name in inner_outputs:
if inner_out_name in parent_block.vars:
out_vars.append(parent_block.var(inner_out_name))
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
parent_block.append_op(
type='while',
inputs={
'X': [parent_block.var(x_name) for x_name in x_name_list],
'Condition': [self.cond_var]
},
outputs={'Out': out_vars,
'StepScopes': [step_scope]},
attrs={'step_block': while_block})
def lstm(x,
c_pre_init,
hidden_dim,
forget_bias=None,
main_program=None,
startup_program=None):
"""
This function helps create an operator for the LSTM (Long Short Term
Memory) cell that can be used inside an RNN.
"""
helper = LayerHelper('lstm_unit', **locals())
rnn = StaticRNN()
with rnn.step():
c_pre = rnn.memory(init=c_pre_init)
x_t = rnn.step_input(x)
before_fc = concat(
input=[x_t, c_pre],
axis=1,
main_program=main_program,
startup_program=startup_program)
after_fc = fc(input=before_fc,
size=hidden_dim * 4,
main_program=main_program,
startup_program=startup_program)
dtype = x.dtype
c = helper.create_tmp_variable(dtype)
h = helper.create_tmp_variable(dtype)
helper.append_op(
type='lstm_unit',
inputs={"X": after_fc,
"C_prev": c_pre},
outputs={"C": c,
"H": h},
attrs={"forget_bias": forget_bias})
rnn.update_memory(c_pre, c)
rnn.output(h)
return rnn()
def lod_rank_table(x, level=0, main_program=None):
"""
This function creates an operator for creating a LOD_RANK_TABLE
using the input x.
"""
helper = LayerHelper("lod_rank_table", **locals())
table = helper.create_variable(
type=core.VarDesc.VarType.LOD_RANK_TABLE,
name=unique_name("lod_rank_table"))
helper.append_op(
type='lod_rank_table',
inputs={'X': x},
outputs={'Out': table},
attrs={'level': level})
return table
def max_sequence_len(rank_table, main_program=None):
"""
This function creates an operator to calculate the length of
max seqence through input rank_table(should be a lod_rank_table)
"""
helper = LayerHelper("max_seqence_len", **locals())
res = helper.create_tmp_variable(dtype="int64")
helper.append_op(
type="max_sequence_len",
inputs={"RankTable": rank_table},
outputs={"Out": res})
return res
def topk(input, k, main_program=None, startup_program=None):
helper = LayerHelper('topk', **locals())
topk_out = helper.create_tmp_variable(dtype=input.data_type)
topk_indices = helper.create_tmp_variable(dtype='int64')
helper.append_op(
type='top_k',
inputs={'X': [input]},
outputs={'Out': [topk_out],
'Indices': [topk_indices]},
attrs={'k': k})
return topk_out, topk_indices
def lod_tensor_to_array(x, table, main_program=None):
"""
This function creates an operator to convert an LOD_Tensor to
an array.
"""
helper = LayerHelper("lod_tensor_to_array", **locals())
array = helper.create_variable(
name=unique_name("lod_tensor_to_array"),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
helper.append_op(
type='lod_tensor_to_array',
inputs={'X': x,
'RankTable': table},
outputs={'Out': array})
return array
def array_to_lod_tensor(x, table, main_program=None, startup_program=None):
"""
This function creates an operator to convert an array to a
LOD_Tensor.
"""
helper = LayerHelper("array_to_lod_tensor", **locals())
tmp = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
type="array_to_lod_tensor",
inputs={'X': x,
'RankTable': table},
outputs={'Out': tmp})
return tmp
def fill_constant(shape,
dtype,
value,
out=None,
main_program=None,
startup_program=None):
"""
This function creates a tensor , with shape as mentioned in the input and
specified dtype and fills this up with a constant value that
comes in the input. It also sets the stop_gradient to be True.
"""
helper = LayerHelper("fill_constant", **locals())
if out is None:
out = helper.create_tmp_variable(dtype=dtype)
helper.append_op(
type='fill_constant',
inputs={},
outputs={'Out': [out]},
attrs={'shape': shape,
'dtype': out.dtype,
'value': float(value)})
out.stop_gradient = True
return out
def fill_constant_batch_size_like(input,
shape,
dtype,
value,
input_dim_idx=0,
output_dim_idx=0,
main_program=None,
startup_program=None):
helper = LayerHelper("fill_constant_batch_size_like", **locals())
out = helper.create_tmp_variable(dtype=dtype)
helper.append_op(
type='fill_constant_batch_size_like',
inputs={'Input': input},
outputs={'Out': [out]},
attrs={
'shape': shape,
'dtype': out.dtype,
'value': float(value),
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx
})
out.stop_gradient = True
return out
def ones(shape, dtype, main_program=None):
"""
This function performs the same function as fill_constant() declared above
with the constant value being 1.0.
"""
return fill_constant(value=1.0, **locals())
def zeros(shape, dtype, main_program=None):
"""
This function performs the same function as fill_constant() declared above
with the constant value being 0.0.
"""
return fill_constant(value=0.0, **locals())
def increment(x,
value=1.0,
in_place=True,
main_program=None,
startup_program=None):
"""
This function creates an operator to increment each value in the input
`x` by an amount: `value` as mentioned in the input parameter. This
operation is performed in-place by default.
"""
helper = LayerHelper("increment", **locals())
if not in_place:
out = helper.create_tmp_variable(dtype=x.dtype)
else:
out = x
helper.append_op(
type='increment',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'step': float(value)})
return out
def array_write(x, i, array=None, main_program=None, startup_program=None):
"""
This function creates an operator to write the data out as a
LOD_TENSOR_ARRAY.
"""
helper = LayerHelper('array_write', **locals())
if array is None:
array = helper.create_variable(
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
helper.append_op(
type='write_to_array',
inputs={'X': [x],
'I': [i]},
outputs={'Out': [array]})
return array
def create_array(dtype, main_program=None):
helper = LayerHelper("array", **locals())
return helper.create_variable(
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=dtype)
def less_than(x, y, cond=None, main_program=None, **ignored):
helper = LayerHelper("less_than", **locals())
if cond is None:
cond = helper.create_tmp_variable(dtype='bool')
cond.stop_gradient = True
helper.append_op(
type='less_than', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [cond]})
return cond
def array_read(array, i, main_program=None, startup_program=None):
"""
This function creates an operator to read the data in as a
LOD_TENSOR_ARRAY.
"""
helper = LayerHelper('array_read', **locals())
if not isinstance(
array,
Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY:
raise TypeError("array should be tensor array vairable")
out = helper.create_tmp_variable(dtype=array.dtype)
helper.append_op(
type='read_from_array',
inputs={'X': [array],
'I': [i]},
outputs={'Out': [out]})
return out
def shrink_memory(x, i, table, main_program=None, startup_program=None):
"""
This function creates an operator to shrink_rnn_memory using the RankTable
as mentioned in the input parameter.
"""
helper = LayerHelper('shrink_memory', **locals())
out = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
type='shrink_rnn_memory',
inputs={'X': [x],
'I': [i],
'RankTable': [table]},
outputs={'Out': [out]},
attrs={})
return out
def array_length(array, main_program=None):
"""
This function creates an operator to find the length of the
LOD_TENSOR_ARRAY.
"""
helper = LayerHelper('array_length', **locals())
tmp = helper.create_tmp_variable(dtype='int64')
tmp.stop_gradient = True
helper.append_op(
type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]})
return tmp
def conv2d_transpose(input,
num_filters,
output_size=None,
filter_size=None,
padding=None,
stride=None,
param_attr=None,
main_program=None,
startup_program=None):
"""
The transpose of conv2d layer.
This layer is also known as deconvolution layer.
Args:
input(Variable): The input image with [N, C, H, W] format.
num_filters(int): The number of filter. It is as same as the output
image channel.
output_size(int|tuple|None): The output image size. If output size is a
tuple, it must contain two integers, (image_H, image_W). This
parameter only works when filter_size is None.
filter_size(int|tuple|None): The filter size. If filter_size is a tuple,
it must contain two integers, (filter_size_H, filter_size_W).
Otherwise, the filter will be a square. None if use output size to
calculate filter_size
padding(int|tuple): The padding size. If padding is a tuple, it must
contain two integers, (padding_H, padding_W). Otherwise, the
padding_H = padding_W = padding.
stride(int|tuple): The stride size. If stride is a tuple, it must
contain two integers, (stride_H, stride_W). Otherwise, the
stride_H = stride_W = stride.
param_attr: Parameter Attribute.
main_program(Program): the main program
startup_program(Program): the startup program
Returns:
Variable: Output image.
"""
helper = LayerHelper("conv2d_transpose", **locals())
if not isinstance(input, Variable):
raise TypeError("Input of conv2d_transpose must be Variable")
input_channel = input.shape[1]
op_attr = dict()
if isinstance(padding, int):
op_attr['paddings'] = [padding, padding]
elif padding is not None:
op_attr['paddings'] = padding
if isinstance(stride, int):
op_attr['strides'] = stride
elif stride is not None:
op_attr['strides'] = stride
if filter_size is None:
if output_size is None:
raise ValueError("output_size must be set when filter_size is None")
if isinstance(output_size, int):
output_size = [output_size, output_size]
padding = op_attr.get('paddings', [0, 0])
stride = op_attr.get('strides', [1, 1])
h_in = input.shape[2]
w_in = input.shape[3]
filter_size_h = output_size[0] - (h_in - 1) * stride[0] + 2 * padding[0]
filter_size_w = output_size[1] - (w_in - 1) * stride[1] + 2 * padding[1]
filter_size = [filter_size_h, filter_size_w]
elif isinstance(filter_size, int):
filter_size = [filter_size, filter_size]
filter_shape = [input_channel, num_filters] + filter_size
img_filter = helper.create_parameter(
dtype=input.dtype, shape=filter_shape, attr=helper.param_attr)
out = helper.create_tmp_variable(dtype=input.dtype)
helper.append_op(
type='conv2d_transpose',
inputs={'Input': [input],
'Filter': [img_filter]},
outputs={'Output': out},
attrs=op_attr)
return out
class ConditionalBlockGuard(BlockGuard):
def __init__(self, block):
if not isinstance(block, ConditionalBlock):
raise TypeError("block should be conditional block")
super(ConditionalBlockGuard, self).__init__(block.helper.main_program)
self.block = block
def __enter__(self):
return super(ConditionalBlockGuard, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self.block.complete()
return super(ConditionalBlockGuard, self).__exit__(exc_type, exc_val,
exc_tb)
class ConditionalBlock(object):
def __init__(self,
inputs,
name=None,
main_program=None,
startup_program=None):
for each_input in inputs:
if not isinstance(each_input, Variable):
raise TypeError("Each input should be variable")
self.inputs = inputs
self.helper = LayerHelper(
'conditional_block',
name=name,
main_program=main_program,
startup_program=startup_program)
def block(self):
return ConditionalBlockGuard(self)
def complete(self):
inside_block = self.helper.main_program.current_block()
parent_block = self.helper.main_program.block(inside_block.parent_idx)
intermediate = set()
params = set()
for each_op in inside_block.ops:
assert isinstance(each_op, Operator)
for iname in each_op.input_names:
for in_var_name in each_op.input(iname):
if in_var_name not in intermediate:
params.add(in_var_name)
for oname in each_op.output_names:
for out_var_name in each_op.output(oname):
intermediate.add(out_var_name)
input_set = set([ipt.name for ipt in self.inputs])
param_list = [
parent_block.var(each_name) for each_name in params
if each_name not in input_set
]
out_list = [
parent_block.var(var_name) for var_name in parent_block.vars
if var_name not in intermediate
]
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
parent_block.append_op(
type='conditional_block',
inputs={
'X': self.inputs,
'Params': param_list,
},
outputs={'Out': out_list,
'Scope': [step_scope]},
attrs={'block': inside_block})
class IfElseBlockGuard(object):
def __init__(self, is_true, ifelse):
if not isinstance(ifelse, IfElse):
raise TypeError("ifelse must be an instance of IfElse class")
if ifelse.status != IfElse.OUT_IF_ELSE_BLOCKS:
raise ValueError("You cannot invoke IfElse.block() inside a block")
self.is_true = is_true
self.ie = ifelse
if is_true:
self.cond_block = ifelse.conditional_true_block
else:
self.cond_block = ifelse.conditional_false_block
if not isinstance(self.cond_block, ConditionalBlock):
raise TypeError("Unexpected situation")
self.cond_block = self.cond_block.block()
def __enter__(self):
self.ie.status = IfElse.IN_IF_ELSE_TRUE_BLOCKS if self.is_true else IfElse.IN_IF_ELSE_FALSE_BLOCKS
self.cond_block.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.cond_block.__exit__(exc_type, exc_val, exc_tb):
# re-raise inside exception
return False
if len(self.ie.output_table[1 if self.is_true else 0]) == 0:
raise ValueError("Must set output inside block")
self.ie.status = IfElse.OUT_IF_ELSE_BLOCKS
class IfElse(object):
OUT_IF_ELSE_BLOCKS = 0
IN_IF_ELSE_TRUE_BLOCKS = 1
IN_IF_ELSE_FALSE_BLOCKS = 2
def __init__(self, cond, name=None, main_program=None,
startup_program=None):
if not isinstance(cond, Variable):
raise TypeError("cond must be a Variable")
self.helper = LayerHelper(
'ifelse',
name=name,
main_program=main_program,
startup_program=startup_program)
self.cond = cond
self.input_table = {}
self.status = IfElse.OUT_IF_ELSE_BLOCKS
self.conditional_true_block = ConditionalBlock(inputs=[self.cond])
self.conditional_false_block = ConditionalBlock(inputs=[self.cond])
self.output_table = ([], []) # (true_outs, false_outs)
def input(self, x):
if self.status == IfElse.OUT_IF_ELSE_BLOCKS:
raise ValueError("input must in true/false blocks")
if id(x) not in self.input_table:
parent_block = self.parent_block()
out_true = parent_block.create_var(
name=unique_name('ifelse_input' + self.helper.name),
dtype=x.dtype)
out_false = parent_block.create_var(
name=unique_name('ifelse_input' + self.helper.name),
dtype=x.dtype)
parent_block.append_op(
type='split_lod_tensor',
inputs={
'X': x,
'Mask': self.cond,
},
outputs={'OutTrue': out_true,
'OutFalse': out_false},
attrs={'level': 0})
self.input_table[id(x)] = (out_true, out_false)
else:
out_true, out_false = self.input_table[id(x)]
if self.status == IfElse.IN_IF_ELSE_TRUE_BLOCKS:
return out_true
else:
return out_false
def parent_block(self):
current_block = self.helper.main_program.current_block()
return self.helper.main_program.block(current_block.parent_idx)
def true_block(self):
return IfElseBlockGuard(True, self)
def false_block(self):
return IfElseBlockGuard(False, self)
def output(self, *outs):
if self.status == self.OUT_IF_ELSE_BLOCKS:
raise ValueError("output can only be invoked in the sub-block")
out_table = self.output_table[1 if self.status ==
self.IN_IF_ELSE_TRUE_BLOCKS else 0]
parent_block = self.parent_block()
for each_out in outs:
if not isinstance(each_out, Variable):
raise TypeError("Each output should be a variable")
# create outside tensor
outside_out = parent_block.create_var(
name=unique_name("_".join([self.helper.name, 'output'])),
dtype=each_out.dtype)
out_table.append(outside_out)
# assign local var to outside
assign(
input=each_out,
output=outside_out,
main_program=self.helper.main_program,
startup_program=self.helper.startup_program)
def __call__(self):
if self.status != self.OUT_IF_ELSE_BLOCKS:
raise ValueError("IfElse::__call__ must be out of sub-block")
false_len, true_len = map(len, self.output_table)
if false_len == 0 and true_len == 0:
raise ValueError("Must invoke true_block/false_block before "
"__call__")
elif false_len != true_len and false_len != 0 and true_len != 0:
raise ValueError("The output side must be same")
elif false_len == 0 or true_len == 0:
return self.output_table[0 if false_len != 0 else 1]
# else none of false_len/true_len is zero
# merge together
rlist = []
for false_var, true_var in zip(*self.output_table):
rlist.append(
merge_lod_tensor(
in_true=true_var,
in_false=false_var,
mask=self.cond,
x=self.cond,
level=0,
main_program=self.helper.main_program,
startup_program=self.helper.startup_program))
return rlist
class DynamicRNN(object):
BEFORE_RNN = 0
IN_RNN = 1
AFTER_RNN = 2
def __init__(self, name=None, main_program=None, startup_program=None):
self.helper = LayerHelper(
'dynamic_rnn',
name=name,
main_program=main_program,
startup_program=startup_program)
self.status = DynamicRNN.BEFORE_RNN
self.lod_rank_table = None
self.max_seq_len = None
self.step_idx = None
self.zero_idx = fill_constant(shape=[1], value=0, dtype='int64')
self.mem_dict = dict()
self.output_array = []
self.outputs = []
self.cond = self.helper.create_tmp_variable(dtype='bool')
self.cond.stop_gradient = False
self.while_op = While(self.cond)
self.input_array = []
self.mem_link = []
def step_input(self, x):
self._assert_in_rnn_block_("step_input")
if not isinstance(x, Variable):
raise TypeError(
"step_input() can only take a Variable as its input")
parent_block = self._parent_block_()
if self.lod_rank_table is None:
self.lod_rank_table = parent_block.create_var(
name=unique_name('lod_rank_table'),
type=core.VarDesc.VarType.LOD_RANK_TABLE)
self.lod_rank_table.stop_gradient = True
parent_block.append_op(
type='lod_rank_table',
inputs={"X": x},
outputs={"Out": self.lod_rank_table})
self.max_seq_len = parent_block.create_var(
name=unique_name('dynamic_rnn_max_seq_len'), dtype='int64')
self.max_seq_len.stop_gradient = False
parent_block.append_op(
type='max_sequence_len',
inputs={'RankTable': self.lod_rank_table},
outputs={"Out": self.max_seq_len})
self.cond.stop_gradient = True
parent_block.append_op(
type='less_than',
inputs={'X': self.step_idx,
'Y': self.max_seq_len},
outputs={'Out': self.cond})
input_array = parent_block.create_var(
name=unique_name('dynamic_rnn_input_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
self.input_array.append((input_array, x.dtype))
parent_block.append_op(
type='lod_tensor_to_array',
inputs={'X': x,
'RankTable': self.lod_rank_table},
outputs={'Out': input_array})
return array_read(
array=input_array, i=self.step_idx, **self.helper.to_kwargs)
@contextlib.contextmanager
def block(self):
if self.status != DynamicRNN.BEFORE_RNN:
raise ValueError("rnn.block() can only be invoke once")
self.step_idx = fill_constant(shape=[1], dtype='int64', value=0)
self.step_idx.stop_gradient = False
self.status = DynamicRNN.IN_RNN
with self.while_op.block():
yield
increment(
x=self.step_idx,
value=1.0,
in_place=True,
**self.helper.to_kwargs)
for new_mem, mem_array in self.mem_link:
array_write(
x=new_mem,
i=self.step_idx,
array=mem_array,
**self.helper.to_kwargs)
less_than(
x=self.step_idx,
y=self.max_seq_len,
cond=self.cond,
**self.helper.to_kwargs)
self.status = DynamicRNN.AFTER_RNN
for each_array in self.output_array:
self.outputs.append(
array_to_lod_tensor(
x=each_array,
table=self.lod_rank_table,
**self.helper.to_kwargs))
def __call__(self, *args, **kwargs):
if self.status != DynamicRNN.AFTER_RNN:
raise ValueError(
"Dynamic RNN outputs can only be retrieved after rnn block")
if len(self.outputs) == 1:
return self.outputs[0]
else:
return self.outputs
def memory(self, init=None, shape=None, value=0.0, dtype='float32'):
self._assert_in_rnn_block_('memory')
if init is not None:
if not isinstance(init, Variable):
raise TypeError(
"The input arg `init` of memory() must be a Variable")
parent_block = self._parent_block_()
mem_array = parent_block.create_var(
name=unique_name('dynamic_rnn_mem_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=init.dtype)
parent_block.append_op(
type='write_to_array',
inputs={'X': init,
'I': self.zero_idx},
outputs={'Out': mem_array})
retv = array_read(
array=mem_array, i=self.step_idx, **self.helper.to_kwargs)
retv = shrink_memory(
x=retv,
i=self.step_idx,
table=self.lod_rank_table,
**self.helper.to_kwargs)
self.mem_dict[retv.name] = mem_array
return retv
else:
if len(self.input_array) == 0:
raise ValueError(
"step_input should be invoked before memory(shape=..., value=...)"
)
parent_block = self._parent_block_()
init = parent_block.create_var(
name=unique_name('mem_init'), dtype=dtype)
arr, dtype = self.input_array[0]
in0 = parent_block.create_var(name=unique_name('in0'), dtype=dtype)
parent_block.append_op(
type='read_from_array',
inputs={'X': [arr],
'I': [self.zero_idx]},
outputs={'Out': [in0]})
parent_block.append_op(
type='fill_constant_batch_size_like',
inputs={'Input': [in0]},
outputs={'Out': [init]},
attrs={
'shape': [-1] + shape,
'value': float(value),
'dtype': init.dtype
})
return self.memory(init=init)
def update_memory(self, ex_mem, new_mem):
self._assert_in_rnn_block_('update_memory')
if not isinstance(ex_mem, Variable):
raise TypeError("The input arg `ex_mem` of update_memory() must "
"be a Variable")
if not isinstance(new_mem, Variable):
raise TypeError("The input arg `new_mem` of update_memory() must "
"be a Variable")
mem_array = self.mem_dict.get(ex_mem.name, None)
if mem_array is None:
raise ValueError("Please invoke memory before update_memory")
if self.lod_rank_table is None:
raise ValueError("Please invoke step_input before update_memory")
self.mem_link.append((new_mem, mem_array))
def output(self, *outputs):
self._assert_in_rnn_block_('output')
parent_block = self._parent_block_()
for each in outputs:
outside_array = parent_block.create_var(
name=unique_name("_".join(
[self.helper.name, "output_array", each.name])),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=each.dtype)
array_write(x=each, i=self.step_idx, array=outside_array)
self.output_array.append(outside_array)
def _parent_block_(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def _assert_in_rnn_block_(self, method):
if self.status != DynamicRNN.IN_RNN:
raise ValueError("{0} can only be invoked inside rnn block.".format(
method))
| StarcoderdataPython |
3225470 | """
Edge Examples
"""
__version__ = "$Revision: 1.13 $"
import sys, os
thisPath = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(thisPath,"..")))
from ExampleBuilders.ExampleBuilder import ExampleBuilder
from Core.IdSet import IdSet
import Core.ExampleUtils as ExampleUtils
from FeatureBuilders.MultiEdgeFeatureBuilder import MultiEdgeFeatureBuilder
from FeatureBuilders.TriggerFeatureBuilder import TriggerFeatureBuilder
#from FeatureBuilders.TokenFeatureBuilder import TokenFeatureBuilder
from Core.SimpleGraph import Graph
from Utils.ProgressCounter import ProgressCounter
import Utils.Libraries.combine as combine
import Utils.ElementTreeUtils as ETUtils
import gzip
import types
from collections import defaultdict
def combinations(iterable, r):
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
def compareInteractionPrecedence(e1, e2):
"""
e1/e2 = (interaction, pathdist, lindist, tok2pos)
"""
if e1[1] > e2[1]:
return 1
elif e1[1] < e2[1]:
return -1
else: # same dependency distance
if e1[2] > e2[2]:
return 1
elif e1[2] < e2[2]:
return -1
else: # same linear distance
if e1[3] > e2[3]:
return 1
elif e1[3] < e2[3]:
return -1
else: # same head token for entity 2
return 0
#assert False, ("Precedence error",e1,e2)
class UnmergingExampleBuilder(ExampleBuilder):
"""
This example builder makes unmerging examples, i.e. examples describing
potential events.
"""
#def __init__(self, style="trigger_features:typed:directed:no_linear:entities:genia_limits:noMasking:maxFeatures", length=None, types=[], featureSet=None, classSet=None):
def __init__(self, style=None, length=None, types=[], featureSet=None, classSet=None):
# reset style regardless of input
#style="trigger_features:typed:directed:no_linear:entities:genia_limits:noMasking:maxFeatures"
if featureSet == None:
featureSet = IdSet()
if classSet == None:
classSet = IdSet(1)
else:
classSet = classSet
assert( classSet.getId("neg") == 1 )
ExampleBuilder.__init__(self, classSet=classSet, featureSet=featureSet)
defaultNone = ["binary", "trigger_features","typed","directed","no_linear","entities","genia_limits",
"noAnnType", "noMasking", "maxFeatures", "no_merge", "disable_entity_features",
"disable_single_element_features", "disable_ngram_features", "disable_path_edge_features"]
defaultParameters = {}
for name in defaultNone:
defaultParameters[name] = None
defaultParameters["keep_intersentence"] = False
defaultParameters["keep_intersentence_gold"] = True
defaultParameters["no_arg_count_upper_limit"] = False
self.styles = self._setDefaultParameters(defaultParameters)
self.styles = self.getParameters(style)
self.multiEdgeFeatureBuilder = MultiEdgeFeatureBuilder(self.featureSet)
self.multiEdgeFeatureBuilder.noAnnType = self.styles["noAnnType"]
self.multiEdgeFeatureBuilder.maskNamedEntities = not self.styles["noMasking"]
self.multiEdgeFeatureBuilder.maximum = self.styles["maxFeatures"]
#self.tokenFeatureBuilder = TokenFeatureBuilder(self.featureSet)
self.pathLengths = length
assert(self.pathLengths == None)
self.types = types
self.triggerFeatureBuilder = TriggerFeatureBuilder(self.featureSet)
self.triggerFeatureBuilder.useNonNameEntities = True
#self.outFile = open("exampleTempFile.txt","wt")
def getInteractionEdgeLengths(self, sentenceGraph, paths):
"""
Return dependency and linear length of all interaction edges
(measured between the two tokens).
"""
interactionLengths = {}
count = 0
for interaction in sentenceGraph.interactions:
# Calculated interaction edge dep and lin length
e1Id = interaction.get("e1")
e2Id = interaction.get("e2")
if e2Id not in sentenceGraph.entitiesById: # intersentence interaction
interactionLengths[interaction] = (interaction, -count, -count, -count)
continue
e1 = sentenceGraph.entitiesById[e1Id]
e2 = sentenceGraph.entitiesById[e2Id]
t1 = sentenceGraph.entityHeadTokenByEntity[e1]
t2 = sentenceGraph.entityHeadTokenByEntity[e2]
# Get dep path length
if t1 != t2:
path = paths.getPaths(t1, t2)
if t1 != t2 and len(path) > 0:
pathLength = min(len(x) for x in path) #len(paths[t1][t2])
else: # no dependencyPath
pathLength = 999999 # more than any real path
# Linear distance
t1Pos = -1
t2Pos = -1
for i in range(len(sentenceGraph.tokens)):
if sentenceGraph.tokens[i] == t1:
t1Pos = i
if t2Pos != -1:
break
if sentenceGraph.tokens[i] == t2:
t2Pos = i
if t1Pos != -1:
break
linLength = abs(t1Pos - t2Pos)
interactionLengths[interaction] = (interaction, pathLength, linLength, t2Pos)
count += 1
return interactionLengths
def eventIsGold(self, entity, arguments, sentenceGraph, goldGraph, goldEntitiesByOffset, allGoldInteractions):
offset = entity.get("headOffset")
if not goldEntitiesByOffset.has_key(offset):
return False
eType = entity.get("type")
goldEntities = goldEntitiesByOffset[offset]
# Check all gold entities for a match
for goldEntity in goldEntities:
isGold = True
# The entity type must match
if goldEntity.get("type") != eType:
isGold = False
continue
goldEntityId = goldEntity.get("id")
# Collect the gold interactions
goldInteractions = []
for goldInteraction in allGoldInteractions: #goldGraph.interactions:
if goldInteraction.get("e1") == goldEntityId and goldInteraction.get("event") == "True":
goldInteractions.append(goldInteraction)
# Argument count rules
if len(goldInteractions) != len(arguments): # total number of edges differs
isGold = False
continue
# count number of edges per type
argTypeCounts = {}
for argument in arguments:
argType = argument.get("type")
if not argTypeCounts.has_key(argType): argTypeCounts[argType] = 0
argTypeCounts[argType] += 1
# count number of gold edges per type
goldTypeCounts = {}
for argument in goldInteractions:
argType = argument.get("type")
if not goldTypeCounts.has_key(argType): goldTypeCounts[argType] = 0
goldTypeCounts[argType] += 1
# argument edge counts per type must match
if argTypeCounts != goldTypeCounts:
isGold = False
continue
# Exact argument matching
for argument in arguments: # check all edges
e1 = argument.get("e1")
e2 = argument.get("e2")
if e2 not in sentenceGraph.entitiesById: # intersentence argument, assumed to be correct
found = True
continue
e2Entity = sentenceGraph.entitiesById[e2]
e2Offset = e2Entity.get("headOffset")
e2Type = e2Entity.get("type")
argType = argument.get("type")
found = False
for goldInteraction in goldInteractions:
if goldInteraction.get("type") == argType:
if goldInteraction.get("e2") in goldGraph.entitiesById: # if not, assume this goldInteraction is an intersentence interaction
goldE2Entity = goldGraph.entitiesById[goldInteraction.get("e2")]
if goldE2Entity.get("headOffset") == e2Offset and goldE2Entity.get("type") == e2Type:
found = True
break
if found == False: # this edge did not have a corresponding gold edge
isGold = False
break
# Event is in gold
if isGold:
break
return isGold
def sortInteractionsById(self, interactions):
# The order of the interactions affects the order of the unmerging examples, and this
# affects performance. It's not clear whether this is what really happens, or whether
# the order of the interactions has some effect on the consistency of the unmerging
# features (it shouldn't). However, in case it does, this function is left here for now,
# although it shouldn't be needed at all. In any case the impact is minimal, for GE
# 53.22 vs 53.28 on the development set.
pairs = []
for interaction in interactions:
pairs.append( (int(interaction.get("id").split(".i")[-1]), interaction) )
pairs.sort()
return [x[1] for x in pairs]
def processDocument(self, sentences, goldSentences, outfile, structureAnalyzer=None):
self.documentEntitiesById = {}
for sentence in sentences:
for entity in sentence.entities:
assert entity.get("id") not in self.documentEntitiesById
self.documentEntitiesById[entity.get("id")] = entity
for i in range(len(sentences)):
sentence = sentences[i]
goldSentence = None
if goldSentences != None:
goldSentence = goldSentences[i]
self.progress.update(1, "Building examples ("+sentence.sentence.get("id")+"): ")
self.processSentence(sentence, outfile, goldSentence, structureAnalyzer=structureAnalyzer)
def buildExamplesFromGraph(self, sentenceGraph, outfile, goldGraph=None, structureAnalyzer=None):
"""
Build examples for a single sentence. Returns a list of examples.
See Core/ExampleUtils for example format.
"""
self.multiEdgeFeatureBuilder.setFeatureVector(resetCache=True)
self.triggerFeatureBuilder.initSentence(sentenceGraph)
exampleIndex = 0
undirected = sentenceGraph.dependencyGraph.toUndirected()
paths = undirected
# Get argument order
self.interactionLenghts = self.getInteractionEdgeLengths(sentenceGraph, paths)
# Map tokens to character offsets
tokenByOffset = {}
for i in range(len(sentenceGraph.tokens)):
token = sentenceGraph.tokens[i]
if goldGraph != None: # check that the tokenizations match
goldToken = goldGraph.tokens[i]
assert token.get("id") == goldToken.get("id") and token.get("charOffset") == goldToken.get("charOffset")
tokenByOffset[token.get("charOffset")] = token.get("id")
# Map gold entities to their head offsets
goldEntitiesByOffset = {}
if goldGraph != None:
for entity in goldGraph.entities:
offset = entity.get("headOffset")
assert offset != None
if not goldEntitiesByOffset.has_key(offset):
goldEntitiesByOffset[offset] = []
goldEntitiesByOffset[offset].append(entity)
if self.styles["no_merge"]:
mergeInput = False
entities = sentenceGraph.entities
else:
mergeInput = True
sentenceGraph.mergeInteractionGraph(True)
entities = sentenceGraph.mergedEntities
self.exampleStats.addValue("Duplicate entities skipped", len(sentenceGraph.entities) - len(entities))
exampleIndex = 0
for entity in entities: # sentenceGraph.entities:
if type(entity) in types.StringTypes: # dummy entity for intersentence interactions
continue
eType = entity.get("type")
assert eType != None, entity.attrib
eType = str(eType)
interactions = [x[2] for x in sentenceGraph.getOutInteractions(entity, mergeInput)]
interactions = self.sortInteractionsById(interactions)
interactionCounts = defaultdict(int)
validInteractionsByType = defaultdict(list)
for interaction in interactions:
if interaction.get("event") != "True":
continue
e1 = sentenceGraph.entitiesById[interaction.get("e1")]
if interaction.get("e2") in sentenceGraph.entitiesById:
e2 = sentenceGraph.entitiesById[interaction.get("e2")]
if interaction.get("type") in structureAnalyzer.getValidEdgeTypes(e1.get("type"), e2.get("type")):
validInteractionsByType[interaction.get("type")].append(interaction)
else: # intersentence
validInteractionsByType[interaction.get("type")].append(interaction)
interactionCounts[interaction.get("type")] += 1
interactionCountString = ",".join([key + "=" + str(interactionCounts[key]) for key in sorted(interactionCounts.keys())])
#argCombinations = self.getArgumentCombinations(eType, interactions, entity.get("id"))
intCombinations = []
validIntTypeCount = 0
maxArgCount = 0
if self.debug:
print >> sys.stderr, entity.get("id"), entity.get("type"), "int:" + interactionCountString, "validInt:" + str(validInteractionsByType)
for intType in sorted(validInteractionsByType.keys()): # for each argument type the event can have
validIntTypeCount += 1
intCombinations.append([])
minArgs, maxArgs = structureAnalyzer.getArgLimits(entity.get("type"), intType)
if maxArgs > maxArgCount:
maxArgCount = maxArgs
#if maxArgs > 1: # allow any number of arguments for cases like Binding
# maxArgs = len(validInteractionsByType[intType])
for combLen in range(minArgs, maxArgs+1): # for each valid argument count, get all possible combinations. note that there may be zero-lenght combination
for singleTypeArgCombination in combinations(validInteractionsByType[intType], combLen):
intCombinations[-1].append(singleTypeArgCombination)
# e.g. theme:[a,b], cause:[d] = [[
# intCombinations now contains a list of lists, each of which has a tuple for each valid combination
# of one argument type. Next, we'll make all valid combinations of multiple argument types
if self.debug:
print >> sys.stderr, " ", "intCombinations", intCombinations
argCombinations = combine.combine(*intCombinations)
if self.debug:
print >> sys.stderr, " ", "argCombinations", argCombinations
for i in range(len(argCombinations)):
argCombinations[i] = sum(argCombinations[i], ())
#sum(argCombinations, []) # flatten nested list
if self.debug:
print >> sys.stderr, " ", "argCombinations flat", argCombinations
for argCombination in argCombinations:
# Originally binary classification
if goldGraph != None:
isGoldEvent = self.eventIsGold(entity, argCombination, sentenceGraph, goldGraph, goldEntitiesByOffset, goldGraph.interactions)
#if eType == "Binding":
# print argCombination[0].get("e1"), len(argCombination), isGoldEvent
else:
isGoldEvent = False
# Named (multi-)class
if isGoldEvent:
# category = "zeroArg"
# if validIntTypeCount == 1:
# category = "singleArg" # event has 0-1 arguments (old simple6)
# if validIntTypeCount > 1:
# category = "multiType" # event has arguments of several types, 0-1 of each (old Regulation)
# if maxArgCount > 1:
# category = "multiArg" # event can have 2-n of at least one argument type (old Binding)
if self.styles["binary"]:
category = "pos"
else:
category = entity.get("type")
assert category != None
else:
category = "neg"
self.exampleStats.beginExample(category)
issues = defaultdict(int)
# early out for proteins etc.
if validIntTypeCount == 0 and entity.get("given") == "True":
self.exampleStats.filter("given-leaf:" + entity.get("type"))
if self.debug:
print >> sys.stderr, " ", category +"("+eType+")", "arg combination", argCombination, "LEAF"
elif structureAnalyzer.isValidEntity(entity) or structureAnalyzer.isValidEvent(entity, argCombination, self.documentEntitiesById, noUpperLimitBeyondOne=self.styles["no_arg_count_upper_limit"], issues=issues):
if self.debug:
print >> sys.stderr, " ", category, "arg combination", argCombination, "VALID"
argString = ""
for arg in argCombination:
argString += "," + arg.get("type") + "=" + arg.get("id")
extra = {"xtype":"um","e":entity.get("id"),"i":argString[1:],"etype":eType,"class":category}
extra["allInt"] = interactionCountString
assert type(extra["etype"]) in types.StringTypes, extra
assert type(extra["class"]) in types.StringTypes, category
assert type(extra["i"]) in types.StringTypes, argString
example = self.buildExample(sentenceGraph, paths, entity, argCombination, interactions)
example[0] = sentenceGraph.getSentenceId()+".x"+str(exampleIndex)
example[1] = self.classSet.getId(category)
example[3] = extra
#examples.append( example )
ExampleUtils.appendExamples([example], outfile)
exampleIndex += 1
else: # not a valid event or valid entity
if len(issues) == 0: # must be > 0 so that it gets filtered
if not structureAnalyzer.isValidEntity(entity):
issues["INVALID_ENTITY:"+eType] += 1
else:
issues["UNKNOWN_ISSUE_FOR:"+eType] += 1
for key in issues:
self.exampleStats.filter(key)
if self.debug:
print >> sys.stderr, " ", category, "arg combination", argCombination, "INVALID", issues
self.exampleStats.endExample()
#return examples
return exampleIndex
def buildExample(self, sentenceGraph, paths, eventEntity, argCombination, allInteractions): #themeEntities, causeEntities=None):
# NOTE!!!! TODO
# add also features for arguments present, but not in this combination
features = {}
self.features = features
self.buildInterArgumentBagOfWords(argCombination, sentenceGraph)
eventEntityType = eventEntity.get("type")
if eventEntityType == "Binding":
interactionIndex = {}
groupInteractionLengths = []
for interaction in allInteractions:
groupInteractionLengths.append(self.interactionLenghts[interaction])
groupInteractionLengths.sort(compareInteractionPrecedence)
#print groupInteractionLengths
for i in range(len(groupInteractionLengths)):
interactionIndex[groupInteractionLengths[i][0]] = i
eventToken = sentenceGraph.entityHeadTokenByEntity[eventEntity]
self.triggerFeatureBuilder.setFeatureVector(self.features)
self.triggerFeatureBuilder.tag = "trg_"
self.triggerFeatureBuilder.buildFeatures(eventToken)
self.triggerFeatureBuilder.tag = None
#self.setFeature("rootType_"+eventEntity.get("type"), 1)
argThemeCount = 0
argCauseCount = 0
argCounts = {}
# Current example's edge combination
for arg in argCombination:
if arg.get("type") == "Theme":
argThemeCount += 1
tag = "argTheme"
self.buildArgumentFeatures(sentenceGraph, paths, features, eventToken, arg, tag)
if eventEntityType == "Binding":
tag += str(interactionIndex[arg])
self.buildArgumentFeatures(sentenceGraph, paths, features, eventToken, arg, tag)
elif arg.get("type") == "Cause": # Cause
argCauseCount += 1
self.buildArgumentFeatures(sentenceGraph, paths, features, eventToken, arg, "argCause")
else:
argType = arg.get("type")
if argType not in argCounts: argCounts[argType] = 0
self.buildArgumentFeatures(sentenceGraph, paths, features, eventToken, arg, "arg"+argType)
argCounts[argType] += 1
# Edge group context
contextThemeCount = 0
contextCauseCount = 0
for interaction in allInteractions:
if interaction in argCombination: # Already part of current example's combination
continue
if interaction.get("type") == "Theme":
contextThemeCount += 1
tag = "conTheme"
self.buildArgumentFeatures(sentenceGraph, paths, features, eventToken, interaction, tag)
if eventEntityType == "Binding":
tag += str(interactionIndex[interaction])
self.buildArgumentFeatures(sentenceGraph, paths, features, eventToken, interaction, tag)
else: # Cause
contextCauseCount += 1
self.buildArgumentFeatures(sentenceGraph, paths, features, eventToken, interaction, "conCause")
self.setFeature("argCount", len(argCombination))
self.setFeature("argCount_" + str(len(argCombination)), 1)
self.setFeature("interactionCount", len(allInteractions))
self.setFeature("interactionCount_" + str(len(allInteractions)), 1)
self.setFeature("argThemeCount", argThemeCount)
self.setFeature("argThemeCount_" + str(argThemeCount), 1)
self.setFeature("argCauseCount", argCauseCount)
self.setFeature("argCauseCount_" + str(argCauseCount), 1)
for key in sorted(argCounts.keys()):
self.setFeature("arg" + key + "Count", argCounts[key])
self.setFeature("arg" + key + "Count_" + str(argCounts[key]), 1)
self.setFeature("interactionThemeCount", contextThemeCount)
self.setFeature("interactionThemeCount_" + str(contextThemeCount), 1)
self.setFeature("interactionCauseCount", contextCauseCount)
self.setFeature("interactionCauseCount_" + str(contextCauseCount), 1)
self.triggerFeatureBuilder.tag = ""
self.triggerFeatureBuilder.setFeatureVector(None)
# Common features
# if e1Type.find("egulation") != -1: # leave r out to avoid problems with capitalization
# if entity2.get("given") == "True":
# features[self.featureSet.getId("GENIA_regulation_of_protein")] = 1
# else:
# features[self.featureSet.getId("GENIA_regulation_of_event")] = 1
# define extra attributes
return [None,None,features,None]
def buildArgumentFeatures(self, sentenceGraph, paths, features, eventToken, arg, tag):
if arg.get("e2") not in sentenceGraph.entitiesById: # intersentence argument
return
argEntity = sentenceGraph.entitiesById[arg.get("e2")]
argToken = sentenceGraph.entityHeadTokenByEntity[argEntity]
self.buildEdgeFeatures(sentenceGraph, paths, features, eventToken, argToken, tag)
self.triggerFeatureBuilder.tag = tag + "trg_"
self.triggerFeatureBuilder.buildFeatures(argToken)
if argEntity.get("given") == "True":
self.setFeature(tag+"Protein", 1)
else:
self.setFeature(tag+"Event", 1)
self.setFeature("nestingEvent", 1)
self.setFeature(tag+"_"+argEntity.get("type"), 1)
def buildEdgeFeatures(self, sentenceGraph, paths, features, eventToken, argToken, tag):
#eventToken = sentenceGraph.entityHeadTokenByEntity[eventNode]
#argToken = sentenceGraph.entityHeadTokenByEntity[argNode]
self.multiEdgeFeatureBuilder.tag = tag + "_"
self.multiEdgeFeatureBuilder.setFeatureVector(features, None, None, False)
self.setFeature(tag+"_present", 1)
path = paths.getPaths(eventToken, argToken)
if eventToken != argToken and len(path) > 0:
path = path[0]
else:
path = [eventToken, argToken]
#edges = None
if not self.styles["disable_entity_features"]:
self.multiEdgeFeatureBuilder.buildEntityFeatures(sentenceGraph)
self.multiEdgeFeatureBuilder.buildPathLengthFeatures(path)
#if not "disable_terminus_features" in self.styles:
# self.multiEdgeFeatureBuilder.buildTerminusTokenFeatures(path, sentenceGraph) # remove for fast
if not self.styles["disable_single_element_features"]:
self.multiEdgeFeatureBuilder.buildSingleElementFeatures(path, sentenceGraph)
if not self.styles["disable_ngram_features"]:
self.multiEdgeFeatureBuilder.buildPathGrams(2, path, sentenceGraph) # remove for fast
self.multiEdgeFeatureBuilder.buildPathGrams(3, path, sentenceGraph) # remove for fast
self.multiEdgeFeatureBuilder.buildPathGrams(4, path, sentenceGraph) # remove for fast
if not self.styles["disable_path_edge_features"]:
self.multiEdgeFeatureBuilder.buildPathEdgeFeatures(path, sentenceGraph)
#self.multiEdgeFeatureBuilder.buildSentenceFeatures(sentenceGraph)
self.multiEdgeFeatureBuilder.setFeatureVector(None, None, None, False)
self.multiEdgeFeatureBuilder.tag = ""
def buildInterArgumentBagOfWords(self, arguments, sentenceGraph):
if len(arguments) < 2:
return
indexByToken = {}
for i in range(len(sentenceGraph.tokens)):
indexByToken[sentenceGraph.tokens[i]] = i
argTokenIndices = set()
for arg in arguments:
if arg.get("e2") in sentenceGraph.entitiesById: # skip intersentence interactions
argEntity = sentenceGraph.entitiesById[arg.get("e2")]
argToken = sentenceGraph.entityHeadTokenByEntity[argEntity]
argTokenIndices.add(indexByToken[argToken])
if len(argTokenIndices) < 1:
return
minIndex = min(argTokenIndices)
maxIndex = max(argTokenIndices)
self.setFeature("argBoWRange", (maxIndex-minIndex))
self.setFeature("argBoWRange_" + str(maxIndex-minIndex), 1)
bow = set()
for i in range(minIndex+1, maxIndex):
token = sentenceGraph.tokens[i]
if len(sentenceGraph.tokenIsEntityHead[token]) == 0 and not sentenceGraph.tokenIsName[token]:
bow.add(token.get("text"))
bow = sorted(list(bow))
for word in bow:
self.setFeature("argBoW_"+word, 1)
if word in ["/", "-"]:
self.setFeature("argBoW_slashOrHyphen", 1)
if len(bow) == 1:
self.setFeature("argBoWonly_"+bow[0], 1)
if bow[0] in ["/", "-"]:
self.setFeature("argBoWonly_slashOrHyphen", 1)
| StarcoderdataPython |
1678167 | <filename>components/album-service/src/add_subscription_to_album_task_processor.py
'''
add_subscription_to_album_task_processor.py
Processes tasks to add subscriptions to albums
Author: <NAME>
eMail: <EMAIL>
GPG-Key-ID: <KEY>
GPG-Fingerprint: A757 5741 FD1E 63E8 357D 48E2 3C68 AE70 B2F8 AA17
License: MIT License
'''
import threading
import redis
import logging
import json
import sqlite3
import config
class AddSubscriptionToAlbumTaskProcessor(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.myRedis = redis.Redis(host='redis', port=6379, db=0)
logging.basicConfig(level=logging.DEBUG)
def run(self):
while True:
logging.info("Waiting for next add-subscription-to-album task.")
task = self.myRedis.brpoplpush('add-subscription-to-album', 'add-subscription-to-album-processing')
metadata = json.loads(task)
logging.info(metadata)
logging.info("Task found, processing...")
## Add subscription to album in DB
with sqlite3.connect(config.DB_STRING) as c:
c.execute("INSERT INTO album_subscriptions VALUES (?, ?)",
[metadata['album-id'], metadata['subscription-id']])
## If successful, remove task from processing list
logging.info("Removing task from processing list")
self.myRedis.lrem('add-subscription-to-album-processing', 0 , task)
| StarcoderdataPython |
9112 | import logging
from collections import namedtuple
logger = logging.getLogger("pybinsim.Pose")
class Orientation(namedtuple('Orientation', ['yaw', 'pitch', 'roll'])):
pass
class Position(namedtuple('Position', ['x', 'y', 'z'])):
pass
class Custom(namedtuple('CustomValues', ['a', 'b', 'c'])):
pass
class Pose:
def __init__(self, orientation, position, custom=Custom(0, 0, 0)):
self.orientation = orientation
self.position = position
self.custom = custom
def create_key(self):
value_list = list(self.orientation) + list(self.position) + list(self.custom)
return ','.join([str(x) for x in value_list])
@staticmethod
def from_filterValueList(filter_value_list):
# 'old' format: orientation - position
if len(filter_value_list) == 6:
orientation = Orientation(filter_value_list[0], filter_value_list[1], filter_value_list[2])
position = Position(filter_value_list[3], filter_value_list[4], filter_value_list[5])
return Pose(orientation, position)
# 'new' format: orientation - position - custom
if len(filter_value_list) == 9:
orientation = Orientation(filter_value_list[0], filter_value_list[1], filter_value_list[2])
position = Position(filter_value_list[3], filter_value_list[4], filter_value_list[5])
custom = Custom(filter_value_list[6], filter_value_list[7], filter_value_list[8])
return Pose(orientation, position, custom)
raise RuntimeError("Unable to parse filter list: {}".format(filter_value_list))
| StarcoderdataPython |
1636636 | import unittest
from kubragen import KubraGen
from kubragen.jsonpatch import FilterJSONPatches_Apply, ObjectFilter, FilterJSONPatch
from kubragen.provider import Provider_Generic
from kg_kubestatemetrics import KubeStateMetricsBuilder, KubeStateMetricsOptions
class TestBuilder(unittest.TestCase):
def setUp(self):
self.kg = KubraGen(provider=Provider_Generic())
def test_empty(self):
kms_config = KubeStateMetricsBuilder(kubragen=self.kg)
self.assertEqual(kms_config.object_name('service'), 'kube-state-metrics')
self.assertEqual(kms_config.object_name('deployment'), 'kube-state-metrics')
def test_basedata(self):
kms_config = KubeStateMetricsBuilder(kubragen=self.kg, options=KubeStateMetricsOptions({
'namespace': 'myns',
'basename': 'mykms',
}))
self.assertEqual(kms_config.object_name('service'), 'mykms')
self.assertEqual(kms_config.object_name('deployment'), 'mykms')
FilterJSONPatches_Apply(items=kms_config.build(kms_config.BUILD_SERVICE), jsonpatches=[
FilterJSONPatch(filters=ObjectFilter(names=[kms_config.BUILDITEM_SERVICE]), patches=[
{'op': 'check', 'path': '/metadata/name', 'cmp': 'equals', 'value': 'mykms'},
{'op': 'check', 'path': '/metadata/namespace', 'cmp': 'equals', 'value': 'myns'},
]),
])
| StarcoderdataPython |
64279 | <reponame>cmlohr/small-python-projects
print("#######################################")
print("## AVERAGE STUDENT HEIGHT CALCULATOR ##")
print("#######################################")
#test heights: 123 149 175 183 166 179 125
student_heights = input(" Input a list of student heights:\n>> ").split()
for n in range(0, len(student_heights)):
student_heights[n] = int(student_heights[n])
#print(student_heights)
total = 0
for num in student_heights:
total += num
#print(total)
def list_height(student_heights):
counter = 0
for char in student_heights:
counter += 1
return counter
length = list_height(student_heights)
#print(length)
ave = round(total / length)
print(f"Average student height is: {ave}") | StarcoderdataPython |
108445 | <filename>demo/views.py
from django.db.models import Q
from rest_framework import decorators
from rest_framework.generics import CreateAPIView, ListAPIView
from rest_framework.mixins import DestroyModelMixin, ListModelMixin
from rest_framework.permissions import IsAdminUser
from devathon.viewsets import ActionModelViewSet, ActionViewSet
from .models import Demo, DemoImage, Comment, Emoji, TechStackTag
from .serializers import (
CommentCreateSerializer,
DemoCreateSerializer,
DemoImageSerializer,
DemoSerializer,
CommentSerializer,
EmojiSerializer,
TechStackTagSerializer,
)
from .filters import DemoFilter, EmojiFilter
from .permissions import (
IsEmojiWriter,
IsImageOfMyDemo,
IsDemoTeamLeader,
IsCommentWriter,
)
class DemoViewSet(ActionModelViewSet):
filterset_class = DemoFilter
queryset = Demo.objects.all()
serializer_class = DemoSerializer
serializer_classes = {
"create": DemoCreateSerializer,
}
action_permission_classes = {
"list": [],
"retreive": [],
"partial_update": [IsDemoTeamLeader],
"update": [IsDemoTeamLeader],
"destroy": [IsDemoTeamLeader | IsAdminUser],
"emoji": [],
}
def get_queryset(self):
if self.request.user.is_authenticated:
return Demo.objects.filter(
Q(show=True) | Q(team__users=self.request.user)
).distinct()
else:
return Demo.objects.filter(show=True)
@decorators.action(detail=True, methods=["POST"])
def emoji(self, request, *args, **kwargs):
typ = request.data.get("typ", None)
demo = self.get_object()
demo.leave_emoji(request.user, typ)
return self.retrieve(request, *args, **kwargs)
class DemoImageView(CreateAPIView):
queryset = DemoImage.objects.all()
serializer_class = DemoImageSerializer
permission_classes = [IsImageOfMyDemo]
lookup_field = "id"
class CommentViewSet(ActionModelViewSet):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
serializer_classes = {
"create": CommentCreateSerializer,
}
action_permission_classes = {
"list": [],
"retreive": [],
"partial_update": [IsCommentWriter],
"update": [IsCommentWriter],
"destroy": [IsCommentWriter],
"like": [],
"dislike": [],
}
@decorators.action(detail=True, methods=["POST"])
def like(self, request, *args, **kwargs):
comment = self.get_object()
comment.like(request.user)
return self.retrieve(request, *args, **kwargs)
@decorators.action(detail=True, methods=["POST"])
def dislike(self, request, *args, **kwargs):
comment = self.get_object()
comment.dislike(request.user)
return self.retrieve(request, *args, **kwargs)
class EmojiViewSet(ActionViewSet, ListModelMixin, DestroyModelMixin):
queryset = Emoji.objects.all()
filterset_class = EmojiFilter
serializer_class = EmojiSerializer
action_permission_classes = {
"list": [],
"destroy": [IsEmojiWriter],
}
def get_queryset(self):
return self.queryset.filter(writer=self.request.user)
class TechStackTagView(ListAPIView):
queryset = TechStackTag.objects.all()
serializer_class = TechStackTagSerializer
permission_classes = []
| StarcoderdataPython |
4829098 | import random
from PIL import Image, ImageDraw
import numpy as np
import csv
import math
def ReadKeys(image: Image) -> list:
"""Input an image and its associated SIFT keypoints.
The argument image is the image file name (without an extension).
The image is read from the PGM format file image.pgm and the
keypoints are read from the file image.key.
ReadKeys returns the following 3 arguments:
image: the image (in PIL 'RGB' format)
keypoints: K-by-4 array, in which each row has the 4 values specifying
a keypoint (row, column, scale, orientation). The orientation
is in the range [-PI, PI] radians.
descriptors: a K-by-128 array, where each row gives a descriptor
for one of the K keypoints. The descriptor is a 1D array of 128
values with unit length.
"""
im = Image.open(image + '.pgm').convert('RGB')
keypoints = []
descriptors = []
first = True
with open(image + '.key', 'r') as f:
reader = csv.reader(f, delimiter=' ', quoting=csv.QUOTE_NONNUMERIC, skipinitialspace=True)
descriptor = []
for row in reader:
if len(row) == 2:
assert first, "Invalid keypoint file header."
assert row[1] == 128, "Invalid keypoint descriptor length in header (should be 128)."
count = row[0]
first = False
if len(row) == 4:
keypoints.append(np.array(row))
if len(row) == 20:
descriptor += row
if len(row) == 8:
descriptor += row
assert len(descriptor) == 128, "Keypoint descriptor length invalid (should be 128)."
# normalize the key to unit length
descriptor = np.array(descriptor)
descriptor = descriptor / math.sqrt(np.sum(np.power(descriptor, 2)))
descriptors.append(descriptor)
descriptor = []
assert len(keypoints) == count, "Incorrect total number of keypoints read."
print("Number of keypoints read:", int(count))
return [im, keypoints, descriptors]
def AppendImages(im1, im2):
"""Create a new image that appends two images side-by-side.
The arguments, im1 and im2, are PIL images of type RGB
"""
im1cols, im1rows = im1.size
im2cols, im2rows = im2.size
im3 = Image.new('RGB', (im1cols + im2cols, max(im1rows, im2rows)))
im3.paste(im1, (0, 0))
im3.paste(im2, (im1cols, 0))
return im3
def DisplayMatches(im1, im2, matched_pairs):
"""Display matches on a new image with the two input images placed side by side.
Arguments:
im1 1st image (in PIL 'RGB' format)
im2 2nd image (in PIL 'RGB' format)
matched_pairs list of matching keypoints, im1 to im2
Displays and returns a newly created image (in PIL 'RGB' format)
"""
im3 = AppendImages(im1, im2)
offset = im1.size[0]
draw = ImageDraw.Draw(im3)
for match in matched_pairs:
draw.line((match[0][1], match[0][0], offset + match[1][1], match[1][0]), fill="red", width=2)
# im3.show()
return im3
def isInOrientThreshold(theta1: float, theta2: float, threshold: float) -> bool:
dTheta = ((theta2 - theta1 + (3 * math.pi)) % (2 * math.pi)) - math.pi
EPSILON = 1E-6
return dTheta >= -(threshold + EPSILON) and dTheta <= (threshold + EPSILON)
# keypoint (row, column, scale, orientation). The orientation
# is in the range [-PI, PI1] radians.
def isConsistent(match1: list, match2: list, thresOrient: float, thresScale: float) -> bool:
dOrient1 = (match1[0][3] - match1[1][3]) % (2 * math.pi)
dScale1 = match1[0][2] - match1[1][2]
dOrient2 = (match2[0][3] - match2[1][3]) % (2 * math.pi)
dScale2 = match2[0][2] - match2[1][2]
return isInOrientThreshold(dOrient1, dOrient2, thresOrient) and (
thresScale * dScale2 <= dScale1 <= (1 / thresScale) * dScale2)
def match(image1: Image, image2: Image, siftThreshold: float, useRansac: bool = True,
ransacThresOrient: float = math.pi / 6, ransacThresScale: float = 0.5) -> Image:
"""Input two images and their associated SIFT keypoints.
Display lines connecting the first 5 keypoints from each image.
Note: These 5 are not correct matches, just randomly chosen points.
The arguments image1 and image2 are file names without file extensions.
Returns the number of matches displayed.
Example: match('scene','book')
"""
im1, keypoints1, descriptors1 = ReadKeys(image1)
im2, keypoints2, descriptors2 = ReadKeys(image2)
matched_pairs = []
# descriptors: a K-by-128 array, where each row gives a descriptor
# for one of the K keypoints. The descriptor is a 1D array of 128
# values with unit length.
mat = np.arccos(np.dot(descriptors1, np.transpose(descriptors2)))
for img1Idx, row in enumerate(mat):
sortedRowIndexes = np.argsort(row)
denom = max(row[sortedRowIndexes[1]], 1E-6) # avoid division by 0
if (row[sortedRowIndexes[0]] / denom) < siftThreshold:
matched_pairs.append([keypoints1[img1Idx], keypoints2[sortedRowIndexes[0]]])
if useRansac is False:
return DisplayMatches(im1, im2, matched_pairs)
# ransac
ransacLargestConsistent = [[]] * 10 # make list of 10 empty lists
for i in range(10):
randIndex = random.randrange(len(matched_pairs))
for elem in matched_pairs:
if isConsistent(matched_pairs[randIndex], elem, ransacThresOrient, ransacThresScale):
ransacLargestConsistent[i].append(elem)
# find largest
largestIndex = 0
largestSize = 0
for i in range(10):
currentLength = len(ransacLargestConsistent[i])
if currentLength > largestSize:
largestSize = currentLength
largestIndex = i
im3 = DisplayMatches(im1, im2, ransacLargestConsistent[largestIndex])
return im3
def test():
assert (isInOrientThreshold(-0.75 * math.pi, 0.25 * math.pi, math.pi))
assert (not isInOrientThreshold(-1 * math.pi, 0.25 * math.pi, math.pi / 2))
assert (isInOrientThreshold(-0.5 * math.pi, 0.25 * math.pi, math.pi))
assert (isInOrientThreshold(-1 * math.pi, math.pi, math.pi / 8)) # equal
assert (isInOrientThreshold(-1 / 6 * math.pi, 1 / 6 * math.pi, math.pi / 3))
assert (not isInOrientThreshold(-1 / 6 * math.pi, 1 / 6 * math.pi, math.pi / 4))
assert (isInOrientThreshold(11 / 6 * math.pi, -11 / 6 * math.pi, math.pi / 3))
assert (isInOrientThreshold(-11 / 6 * math.pi, 1 / 6 * math.pi, math.pi / 3))
assert (not isInOrientThreshold(11 / 6 * math.pi, -5 / 6 * math.pi, math.pi / 3))
assert (not isInOrientThreshold(11 / 6 * math.pi, -5 / 3 * math.pi, math.pi / 3))
assert (isInOrientThreshold(11 / 6 * math.pi, -5 / 3 * math.pi, math.pi))
# Test run...
test()
siftThresholds = [0.40, 0.60, 0.70, 0.75, 0.78, 0.79, 0.80]
for siftThreshold in siftThresholds:
match('scene', 'book', siftThreshold=siftThreshold, useRansac=False).save(
'results/sb_' + ("%0.2f" % siftThreshold) + '_out.png')
siftThresholds = [0.78, 0.79, 0.8]
ransacOrientThresholds = [math.pi / 4, math.pi / 5, math.pi / 6, math.pi / 7, math.pi / 8]
ransacScaleThresholds = [0.4, 0.45, 0.5, 0.55, 0.6]
for siftThreshold in siftThresholds:
for ransacOrientThreshold in ransacOrientThresholds:
for ransacScaleThreshold in ransacScaleThresholds:
match('library', 'library2', siftThreshold=siftThreshold, useRansac=True,
ransacThresOrient=ransacOrientThreshold,
ransacThresScale=ransacScaleThreshold).save(
'results/ll_sift-' + ("%0.2f" % siftThreshold) +
'_orient-' + ("%0.2f" % ransacOrientThreshold) + '_scale-' +
("%0.2f" % ransacScaleThreshold) + '_out.png')
match('library', 'library2', siftThreshold=0.8, useRansac=True,
ransacThresOrient=0.4,
ransacThresScale=0.4).save('bestWithRansac.png')
| StarcoderdataPython |
78025 | # from zemailer.app.core import Settings, initialized_settings
# from zemailer.app.core.mixins.patterns import PatternsMixin
# from zemailer.app.core.sender import SendEmail, SendEmailWithAttachment
# from zemailer.app.core.servers import Gmail, Outlook
# from zemailer.app.patterns import schools
# # from zemailer.app.patterns.constructor import NameConstructor
# from zemailer.app.patterns.patterns import BasicNamePatterns, NamePatterns
# from zemailer.core import settings
# from zemailer.core.settings import configuration
| StarcoderdataPython |
1690506 | <gh_stars>1-10
from typing import List
from vardautomation import FileInfo
FILEINFO_ATTR: List[str] = [
'path',
'path_without_ext',
'work_filename',
'idx',
'preset',
'name',
'workdir',
'a_src',
'a_src_cut',
'a_enc_cut',
'chapter',
'clip',
'_trims_or_dfs',
'clip_cut',
'name_clip_output',
'name_file_final',
'name_clip_output_lossless',
'do_lossless',
'qpfile',
'do_qpfile'
]
def test_file_info_attr() -> None:
file = FileInfo('tests/video_file.mkv')
assert len(vars(file)) == len(FILEINFO_ATTR)
for attr in vars(file):
assert attr in FILEINFO_ATTR
# def test_file_info_trims() -> None:
# file = FileInfo('tests/video_file.mkv', trims_or_dfs=(24, -24))
| StarcoderdataPython |
3240839 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 26 12:23:55 2019
@author: jxf
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
#has incomplete data. 999 points are NaN
def read_file(fname):
with open(fname, "r") as f:
try:
line1= f.readline().split()
except Exception as e:
raise Exception("problem reading first line of file %s"%fname) from e
if line1[0] not in ("YY","#YY","YYYY"):
raise Exception("bad header line 1 for file %s: '%s'"%(fname,line1))
try:
line2= f.readline().split()
except Exception as e:
raise Exception("problem reading second line of file %s"%fname) from e
try:
int(line2[0])
has_second_header= False
header= 0
except ValueError:
if line2[0]in ("#yy","#yr"):
has_second_header=True
header=[0,1]
else:
raise Exception("unexpected second header in file %s"%fname)
#this gives it a second header
df= pd.read_csv(fname, sep='\s+', header= header)#allows you to read the file
if has_second_header:
df.columns = [h1 for (h1, h2) in df.columns]
def mkyear(v):
v = int(v)
if v<100:
return 1900 + v
else:
return v
if 'mm' in df.columns:
df['timestamp']=df.apply(lambda s:datetime.datetime(mkyear(s[0]), int(s[1]), int(s[2]), int(s[3]), int(s[4])),
axis=1)
else:
df['timestamp']=df.apply(lambda s:datetime.datetime(mkyear(s[0]), int(s[1]), int(s[2]), int(s[3]), 0),
axis=1)
df['ATMP'] = df['ATMP'].apply(lambda v:np.NaN if v>100 else v) # 999 is used to indicate no data available
df['WTMP'] = df['WTMP'].apply(lambda v:np.NaN if v>100 else v) # 999 is used to indicate no data available
print("%s has %d entries" % (fname, len(df)))
return df
def build_median_df(df, base_col, year,
index=['01-Jan', '02-Feb', '03-Mar', '04-Apr', '05-May', '06-Jun',
'07-Jul', '08-Aug', '09-Sep', '10-Oct', '11-Nov', '12-Dec']):
if 'YY' in df.columns:
df = df[(df['YY']==year) | (df['YY']==(year-1900))].copy()
elif '#YY' in df.columns:
df = df[df['#YY']==year].copy()
elif 'YYYY' in df.columns:
df = df[df['YYYY']==year].copy()
else:
assert 0, "Did not find a year column in %s for %s" % (df.columns, year)
grouped=df.groupby(pd.Grouper(key = "timestamp", freq="M")).agg({base_col:['median']})
grouped.columns=['%s %s median'%(year, base_col)]
grouped['month'] = index
grouped.set_index('month', drop=True, inplace=True)
return grouped
def load_preprocessed_file(buoyno):
filename="../intermediate-data/processed_%s.csv.gz"%buoyno
return pd.read_csv(filename,parse_dates=["timestamp"],
index_col="timestamp", usecols=["ATMP","WTMP","timestamp"],header=0)
def get_monthly_averages(df):
monthly=df.resample("M").mean()
monthly.index = [pd.Period(str(ts)[0:7]) for ts in monthly.index]
return monthly
MONTH_NAMES = {1:'Jan', 2:'Feb', 3:'Mar', 4:'Apr', 5:'May', 6:'Jun',
7:'Jul', 8:'Aug', 9:'Sep', 10:'Oct', 11:'Nov', 12:'Dec'}
def monthly_computation(monthly, monthno, field):
mask = monthly.index.map(lambda x:x.month)==monthno
anomaly = monthly[mask][field] - (monthly[mask][field].mean())
anomaly.index = [pd.Period(year=int(str(ym)[0:4]), freq='Y')
for ym in anomaly.index]
return anomaly.rename(MONTH_NAMES[monthno])
def compute_anomalies(monthly, field):
return pd.DataFrame([monthly_computation(monthly, m, field)
for m in range(1, 13)])
def plot_anomaly_graph(buoyno, temptype, anomalies):
yearly_means = anomalies.mean()
import scipy.stats
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress([i for (i, y) in enumerate(yearly_means.index)], yearly_means)
fit_type = 'least squares fit'
if np.isnan(slope):
# If we cannot infer a straight line, just connect the endpoints
print("Unable to fit a line")
first_year = yearly_means.index[0]
last_year = yearly_means.index[-1]
print("Creating a line just using the endpoint years (%s, %s)" %
(first_year, last_year))
(slope, intercept) = np.polyfit([0, last_year-first_year], [yearly_means[0], yearly_means[-1]], 1)
fit_type = 'endpoint fit'
values = [i*slope+intercept for i in range(len(yearly_means.index))]
linear_series = pd.Series(data=values, index=yearly_means.index, name='linear fit')
pd.DataFrame({'yearly anomaly':yearly_means, fit_type:linear_series}).plot(figsize=(12,10));
plt.scatter(yearly_means.index, yearly_means)
plt.title('Yearly mean anomaly %s temperature for buoy %s (slope=%0.2f degrees/decade)' %
(temptype, buoyno, slope*10));
plt.ylabel('Degrees C');
plt.savefig('../results/%s-%stemp-anomly.pdf' % (buoyno, temptype))
plt.savefig('../results/%s-%stemp-anomly.png' % (buoyno, temptype))
return slope*10 # the temp anomaly change per decade in degrees C
| StarcoderdataPython |
172929 | <filename>ABC/abc001-abc050/abc025/b.py
# -*- coding: utf-8 -*-
# AtCoder Beginner Contest
def main():
n, a, b = list(map(int, input().split()))
position = 0
for i in range(n):
s, d = list(map(str, input().split()))
if int(d) < a:
d = a
elif int(d) > b:
d = b
else:
d = d
if s == 'West':
d = -1 * int(d)
else:
d = int(d)
position += d
if position == 0:
print(str(0))
elif position > 0:
print('East ' + str(position))
else:
print('West ' + str(abs(position)))
if __name__ == '__main__':
main()
| StarcoderdataPython |
107320 |
import heapq
import random
class myHeapContainer(list):
heap = None
is_max_heap = None
def _is_in_order(self, parent, child):
comp_val = parent > child
#print comp_val
if self.is_max_heap:
return comp_val
else:
return (not comp_val)
def __init__(self, arr = None, is_max = False):
self.is_max_heap = is_max
self.heap = arr
if(arr):
self._heapify()
else:
self.heap = []
def __len__(self):
return len(self.heap)
def _siftdown(self, root, child):
parent = (child - 1) >> 1
while parent >= root:
if not self._is_in_order(self.heap[parent], self.heap[child]):
self.heap[child], self.heap[parent] = self.heap[parent], self.heap[child]
child = parent
parent = (child - 1) >> 1
else:
break
def _siftup(self, root, length = None):
c_pos = root * 2 + 1
if length == None:
tail = len(self)
else:
tail = length
while c_pos < tail:
r_pos = c_pos + 1
if r_pos < tail and self._is_in_order(self.heap[r_pos], self.heap[c_pos]):
c_pos = r_pos
if not self._is_in_order(self.heap[root], self.heap[c_pos]):
self.heap[c_pos], self.heap[root] = self.heap[root], self.heap[c_pos]
root = c_pos
c_pos = root * 2 + 1
else:
break
def _heapify(self):
for parent in xrange(len(self)//2,-1,-1):
self._siftup(parent)
def heappush(self, item):
self.heap.append(item)
self._siftdown(0, len(self)-1)
def heappop(self):
self.heap[0], self.heap[-1] = self.heap[-1], self.heap[0]
retval = self.heap.pop()
self._siftup(0)
return retval
def heapsort(self, arr, is_max = False):
self.is_max_heap = is_max
self.heap = arr
self._heapify()
for i in xrange(len(arr), 0, -1):
self.heap[0], self.heap[i-1] = self.heap[i-1], self.heap[0]
self._siftup(0, i-1)
def main():
arr = []
a = range(10,0,-1)
arr[:] = a
random.shuffle(arr)
print(arr)
h = myHeapContainer(arr, False)
print(h.heap)
sarr = []
while len(h):
sarr.append(h.heappop())
print(sarr)
arr[:] = a
h.heapsort(arr)
print(arr)
arr[:] = a
h.heapsort(arr, True)
print(arr)
'''
print("_______")
arr = range(20,0,-1)
h = myHeapContainer(None, True)
for x in arr:
h.heappush(x)
heapq.heappush(hq, x)
print(h.heap)
print(hq)
while(len(h)):
h.heappop()
heapq.heappop(hq)
print(h.heap)
print(hq)
'''
if __name__ == '__main__':
main()
| StarcoderdataPython |
3341451 | <reponame>tansyab1/PhD-project
import scipy
import os
import numpy as np
def loadimage(filename):
img = scipy.misc.imread(filename).astype(np.float)
return img
def loadimage_gray(filename):
img = scipy.misc.imread(filename, mode='L').astype(np.float)[:,:,np.newaxis]
return img
def saveimages(outimages, prefix='samples', filenames=None, outdir='out'):
numimages = len(outimages)
print("Array shape {}".format(outimages.shape))
if not os.path.exists(outdir):
os.mkdir(outdir)
for i in range(numimages):
if filenames is None:
filename = '{}_{}.png'.format(prefix, i)
else:
filename = '{}_{}'.format(prefix, os.path.basename(filenames[i]))
filename = os.path.join(outdir, filename)
scipy.misc.imsave(filename, np.squeeze(outimages[i, :, :, :]))
| StarcoderdataPython |
3306810 | <reponame>thisdwhitley/ansible-role-add-repo
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_chrome_installed(host):
assert host.package('google-chrome-stable').is_installed
| StarcoderdataPython |
16332 | <gh_stars>0
#!/usr/bin/python
from ansible.module_utils.basic import *
from jdll import API
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: book
author: "<NAME> (@Spredzy)"
version_added: "2.3"
short_description: Gerer des resources books de notre API de test.
description:
- Ce module interagit avec le endpoint /books de notre API de test.
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Si la resource book doit etre presente ou absente.
id:
required: false
description:
- L'identifieur de la resource book.
author:
required: false
description:
- Le nom de l'auteur de book.
title:
required: false
description:
- Titre du book.
summary:
required: true
description:
- Resume du book.
'''
EXAMPLES = '''
# Create a new book
- book:
title: A title
author: An author
summary: A summary
# Update a specific book
- book:
id: XXXX
title: Un titre alternatif
# Delete a book
- book:
id: XXX
state: absent
'''
RETURN = '''
title:
description: The title of the book
returned:
- changed
- success
type: string
sample: A title
summary:
description: The summary of the book
returned:
- changed
- success
type: string
sample: A summary
id:
description: ID of the book
returned:
- changed
- success
type: string
sample: XXXXX
'''
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
id=dict(type='str'),
author=dict(type='str'),
summary=dict(type='str'),
title=dict(type='str'),
),
)
# TODO: List of improvement that could be done with
# this module as a starting point.
#
# * Implement noop mode with --check
# * Set accordingly the 'changed' status based on
# the actual action set
# * Check return number and return message accordinly
#
myapi = API()
result = {
'changed': True
}
if module.params['state'] == 'absent':
if 'id' not in module.params:
module.fail_json(msg='id parameter is mandatory')
# Call to the bindingL: DELETE
myapi.delete_book(module.params['id'])
else:
if module.params['id'] is not None:
update = {}
for key in ['author', 'title', 'summary']:
if key in module.params:
update[key] = module.params[key]
# Call to the binding: PUT
myapi.update_book(module.params['id'], **update)
result.update(update)
elif module.params['author'] is not None or module.params['title'] is not None or module.params['summary'] is not None:
if module.params['author'] is None or module.params['title'] is None or module.params['summary'] is None:
module.fail_json(msg='author, title and summary are mandatory parameters')
book = {
'author': module.params['author'],
'summary': module.params['summary'],
'title': module.params['title']
}
# Call to the binding: POST
myapi.create_book(**book)
result.update(book)
else:
# Call to the binding : GET
books = {'books': myapi.list_books()}
result.update(books)
module.exit_json(**result)
if __name__ == '__main__':
main()
| StarcoderdataPython |
166074 | import pandas as pd
from modules import tqdm
import argparse
import codecs
import os
def conll2003_preprocess(
data_dir, train_name="eng.train", dev_name="eng.testa", test_name="eng.testb"):
train_f = read_data(os.path.join(data_dir, train_name))
dev_f = read_data(os.path.join(data_dir, dev_name))
test_f = read_data(os.path.join(data_dir, test_name))
train = pd.DataFrame({"labels": [x[0] for x in train_f], "text": [x[1] for x in train_f]})
train["cls"] = train["labels"].apply(lambda x: all([y.split("_")[0] == "O" for y in x.split()]))
train.to_csv(os.path.join(data_dir, "{}.train.csv".format(train_name)), index=False, sep="\t")
dev = pd.DataFrame({"labels": [x[0] for x in dev_f], "text": [x[1] for x in dev_f]})
dev["cls"] = dev["labels"].apply(lambda x: all([y.split("_")[0] == "O" for y in x.split()]))
dev.to_csv(os.path.join(data_dir, "{}.dev.csv".format(dev_name)), index=False, sep="\t")
test_ = pd.DataFrame({"labels": [x[0] for x in test_f], "text": [x[1] for x in test_f]})
test_["cls"] = test_["labels"].apply(lambda x: all([y.split("_")[0] == "O" for y in x.split()]))
test_.to_csv(os.path.join(data_dir, "{}.dev.csv".format(test_name)), index=False, sep="\t")
def read_data(input_file):
"""Reads a BIO data."""
with open(input_file, "r", encoding="utf-8") as f:
lines = []
words = []
labels = []
f_lines = f.readlines()
for line in tqdm(f_lines, total=len(f_lines), desc="Process {}".format(input_file)):
contends = line.strip()
word = line.strip().split(' ')[0]
label = line.strip().split(' ')[-1]
if contends.startswith("-DOCSTART-"):
words.append('')
continue
if len(contends) == 0 and not len(words):
words.append("")
if len(contends) == 0 and words[-1] == '.':
lbl = ' '.join([label for label in labels if len(label) > 0])
w = ' '.join([word for word in words if len(word) > 0])
lines.append([lbl, w])
words = []
labels = []
continue
words.append(word)
labels.append(label.replace("-", "_"))
return lines
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str)
parser.add_argument('--train_name', type=str, default="eng.train")
parser.add_argument('--dev_name', type=str, default="eng.testa")
parser.add_argument('--test_name', type=str, default="eng.testb")
return vars(parser.parse_args())
if __name__ == "__main__":
conll2003_preprocess(**parse_args())
| StarcoderdataPython |
15754 | <filename>pytint/machine_io.py
from pytint.interpreters import FiniteAutomaton
from typing import List, Union, Dict, Iterable
import collections
import yaml
class IncompleteMachine(Exception):
def __init__(self, missing: str, machine_type: str):
self.missing = missing
self.machine_type = machine_type
def __str__(self):
return "\"{}\" is required for {} but not provided".format(self.missing, self.machine_type)
class UnsupportedMachine(Exception):
pass
def load_machine(yaml_input: str, machine_type: str = "", name: str = ""):
# loads yaml from input
data = yaml.safe_load(yaml_input)
# if no type override, attempt to load type from data
if not machine_type:
if "type" in data:
machine_type = str(data["type"]).lower()
else:
# can't find machine type
raise IncompleteMachine("type", "machine")
if not name and "name" in data:
name = data["name"]
if "start" in data:
start = str(data["start"])
start
else:
raise IncompleteMachine("start", machine_type)
if machine_type == "dfa" or machine_type == "nfa":
machine = FiniteAutomaton(name)
machine.set_start_state(start)
if "accept-states" in data:
raw_accepted: Union[any, Iterable[any]] = data["accept-states"]
if isinstance(raw_accepted, str) or not isinstance(raw_accepted, collections.Iterable):
raw_accepted = [raw_accepted]
accepted: List[str] = list(map(lambda x: str(x), raw_accepted))
for accept_state in accepted:
machine.add_accepting_state(accept_state)
else:
raise IncompleteMachine("accept-states", machine_type)
if "transitions" in data:
for transition in data["transitions"]:
if len(transition) < 3:
raise Exception("Transitions are 3-tuples!")
state: str = str(transition[0])
raw_symbols: Union[any, Iterable[any]] = str(transition[1])
if isinstance(raw_symbols, str) or not isinstance(raw_symbols, collections.Iterable):
raw_symbols = [raw_symbols]
symbols: List[str] = list(map(lambda x: str(x), raw_symbols))
raw_next_states: Union[any, Iterable[any]] = transition[2]
if isinstance(raw_next_states, str) or not isinstance(raw_next_states, collections.Iterable):
raw_next_states = [raw_next_states]
next_states: List[str] = list(map(lambda x: str(x), raw_next_states))
for symbol in symbols:
if symbol.lower() == "epsilon" or symbol.lower() == "ε": # process epsilon
symbol = "ε"
for next_state in next_states:
machine.add_transition(state, symbol, next_state)
else:
raise IncompleteMachine("transitions", machine_type)
return machine
else:
raise UnsupportedMachine("{} is not a supported machine type!".format(machine_type))
def load_machine_from_file(path: str, machine_type: str = "", name: str = ""):
with open(path, "r") as f:
text = f.read()
return load_machine(text, machine_type, name)
| StarcoderdataPython |
4807749 | # -*- coding: utf-8 -*-
from ws2812 import WS2812
ring = WS2812(spi_bus=1, led_count=16)
data = [
(24, 0, 0),
(0, 24, 0),
(0, 0, 24),
(12, 12, 0),
(0, 12, 12),
(12, 0, 12),
(24, 0, 0),
(21, 3, 0),
(18, 6, 0),
(15, 9, 0),
(12, 12, 0),
(9, 15, 0),
(6, 18, 0),
(3, 21, 0),
(0, 24, 0),
(8, 8, 8),
]
ring.show(data)
| StarcoderdataPython |
3210304 | from __future__ import absolute_import
try:
from ._version \
import \
__version__
except ImportError as e:
__version__ = "no-built"
from .package \
import\
PackageInfo
from .pool \
import\
Pool
from .repository \
import\
Repository
from .request \
import\
Request
from .requirement \
import\
Requirement
from .solver.core \
import\
Solver
| StarcoderdataPython |
110094 | """Alarmageddon main module"""
__version__ = "1.1.2"
from alarmageddon.run import run_tests, construct_publishers, load_config
| StarcoderdataPython |
1705541 | # -*- coding: utf-8 -*-
#
# @Author: <NAME> (<EMAIL>)
# @Date: 2021-08-18
# @Filename: plot_skymakercam.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
# from lvmtan run:
# poetry run container_start --name lvm.all
# from lvmpwi run:
# poetry run container_start --name=lvm.sci.pwi --simulator
# from skymakercam run:
# poetry run python utils/plot_skymakercam.py -v -c python/skymakercam/etc/cameras.yaml lvm.sci.agw.cam
import argparse
import os
import sys
import logging
import time
import uuid
from plotit import PlotIt
from keyreader import KeyReader
from clu import AMQPClient, CommandStatus
from cluplus.proxy import Proxy, invoke, unpack
from skymakercam.camera import SkymakerCameraSystem, SkymakerCamera, asyncio, rebin
async def plot_skymakercam(exptime, binning, guiderect, camname, verb=False, config="../etc/cameras.yaml"):
cs = SkymakerCameraSystem(SkymakerCamera, camera_config=config, verbose=verb)
cam = await cs.add_camera(name=camname, uid=cs._config[camname]["uid"])
if verb:
cs.logger.log(logging.DEBUG, f"cameras {cs.list_available_cameras()}")
# cs.logger.log(logging.DEBUG, f"config {cs._config[camname]['tcs']}")
## client interfaces to TCS, focus stage and kmirror are optional and not needed for skymakercam - it connects internally to them.
amqpc = AMQPClient(name=f"{sys.argv[0]}.proxy-{uuid.uuid4().hex[:8]}")
await amqpc.start()
pwi_tcs = Proxy(amqpc, cs._config[camname]['tcs'])
await pwi_tcs.start()
await pwi_tcs.setConnected(True)
await pwi_tcs.setTracking(True)
focus_stage = Proxy(amqpc, cs._config[camname]['focus_stage'])
await focus_stage.start()
await focus_stage.moveToHome()
kmirror = Proxy(amqpc, cs._config[camname]['kmirror'])
await kmirror.start()
exp = await cam.expose(exptime, camname)
p = PlotIt(rebin(exp.data, binning), guiderect, logger=cs.logger.log)
keyreader = KeyReader(echo=False, block=False)
while(True):
find_objects = False
key = keyreader.getch()
if key == 'q':
cs.logger.log(logging.DEBUG, f"Goodbye and thanks for all the fish.")
break
elif key == 'o':
cs.logger.log(logging.DEBUG, f"Find objects.")
find_objects = True
elif key:
cs.logger.log(logging.DEBUG, f"-{key}-")
exp = await cam.expose(exptime, "LAB TEST")
p.update(rebin(exp.data, binning), find_objects)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-e", '--exptime', type=float, default=5.0,
help="Expose for for exptime seconds")
parser.add_argument("-b", '--binning', type=int, default=1,
help="Image Binning")
parser.add_argument("-g", '--guiderect', type=int, default=60,
help="Size of guide rectangle")
parser.add_argument("-v", '--verbose', action='store_true',
help="print some notes to stdout")
# Name of an optional YAML file
parser.add_argument("-c", '--cfg', default="python/skymakercam/etc/cameras.yaml",
help="YAML file of lvmt cameras")
# the last argument is mandatory: must be the name of exactly one camera
# as used in the configuration file
parser.add_argument('camname', default="sci.agw")
args = parser.parse_args()
asyncio.run(plot_skymakercam(args.exptime, args.binning, args.guiderect, args.camname, verb=args.verbose, config=args.cfg))
if __name__ == '__main__':
main()
| StarcoderdataPython |
4839575 | from datetime import datetime, timedelta, timezone
from typing import Any
import pytest
from queuery_client.cast import _cast_type, cast_row
@pytest.mark.parametrize(
"value, typename, desired",
[
("2", "smallint", 2),
("123", "integer", 123),
("12345", "bigint", 12345),
("3.14", "numeric", 3.14),
("2.71828", "double precision", 2.71828),
("abc", "character", "abc"),
("abcde", "character varying", "abcde"),
(
"2021-01-01 12:34:56",
"timestamp without time zone",
datetime(2021, 1, 1, 12, 34, 56),
),
(
"2021-01-01 12:34:56.123456+09",
"timestamp with time zone",
datetime(
2021,
1,
1,
12,
34,
56,
123456,
tzinfo=timezone(timedelta(hours=9)),
),
),
("true", "boolean", True),
("false", "boolean", False),
("0", "boolean", False),
("1", "boolean", True),
],
)
def test_cast_type(value: str, typename: str, desired: Any) -> None:
assert _cast_type(value, typename) == desired
def test_cast_row() -> None:
inputs = ["1", "1", "1"]
manifest = {
"schema": {
"elements": [
{"type": {"base": "integer"}},
{"type": {"base": "character"}},
{"type": {"base": "boolean"}},
]
}
}
desired = [1, "1", True]
output = cast_row(inputs, manifest)
assert output == desired
def test_cast_row_with_mismatched_manifest() -> None:
row = ["1", "abc", "0", "2021-01-01 01:23:45"]
manifest = {
"schema": {
"elements": [
{"type": {"base": "integer"}},
{"type": {"base": "character"}},
{"type": {"base": "boolean"}},
]
}
}
with pytest.raises(ValueError) as e:
_ = cast_row(row, manifest)
assert str(e.value).startswith("len(row) is not equal to the size of schema.elements in manifest:")
| StarcoderdataPython |
86293 | from django.urls import path, include
from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns=[
path('register/', views.register, name='my_instagram-register'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | StarcoderdataPython |
49155 | from price_picker.common.database import CRUDMixin
from price_picker import db
class Shop(CRUDMixin, db.Model):
""" Shops """
__tablename__ = 'shops'
name = db.Column(db.String(128), primary_key=True, unique=True, default="Zentrale")
@classmethod
def query_factory_all(cls):
# insert default if no shop exists
if cls.query.first() is None:
cls.create()
return cls.query.order_by(cls.name)
def __str__(self):
return self.name
__repr__ = __str__
| StarcoderdataPython |
1761444 | <filename>aiommy/permissions/base.py
from aiohttp import web
class BasePermission(object):
async def check_permission(self, request):
return
async def get_response(self):
return web.HTTPForbidden()
| StarcoderdataPython |
3239942 | from autotext import Autotext
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.feature_extraction.text import TfidfVectorizer
import csv
root = '../datasets/'
train_sets = ['20_newsgroups/']
test_sets = ['20_newsgroups/']
skip_sets = ['20news-18828']
autotext = Autotext( strategy = 'classif', limit_memory = True)
#try:
autotext.train(root+train_sets[0], skip = skip_sets[0])
predictions = autotext.predict(root+test_sets[0])
'''
except Exception as e:
predictions = []
print('ERROR: ')
print(e)
'''
a = accuracy_score(predictions,autotext.y_test)
f = f1_score(predictions,autotext.y_test,average='macro')
p = precision_score(predictions,autotext.y_test,average='macro')
r = recall_score(predictions,autotext.y_test,average='macro')
with open('../results/performance.csv', 'w+') as f_results:
rwriter = csv.writer(f_results, delimiter=',')
rwriter.writerow(['Dataset', 'Accuracy', 'F1', 'Precission', 'Recall'])
rwriter.writerow([test_sets[0], a, f, p, r])
print(train_sets[0]+ ': '+str(a))
| StarcoderdataPython |
17400 | import numpy as np
import pandas as pd
import os.path as path
import abydos.distance as abd
import abydos.phonetic as abp
import pytest
from scipy.sparse import csc_matrix
from sklearn.feature_extraction.text import TfidfVectorizer
import name_matching.name_matcher as nm
@pytest.fixture
def name_match():
package_dir = path.dirname(path.dirname(path.dirname(path.abspath(__file__))))
data = pd.read_csv(path.join(package_dir, 'test','test_names.csv'))
name_matcher = nm.NameMatcher()
name_matcher.load_and_process_master_data(
'company_name', data, start_processing=False, transform=False)
return name_matcher
@pytest.fixture
def adjusted_name():
package_dir = path.dirname(path.dirname(path.dirname(path.abspath(__file__))))
return pd.read_csv(path.join(package_dir, 'test','adjusted_test_names.csv'))
@pytest.fixture
def words():
return ['fun', 'small', 'pool', 'fun', 'small', 'pool', 'sign',
'small', 'pool', 'sign', 'sign', 'small', 'pool', 'sign', 'paper',
'oppose', 'paper', 'oppose', 'brown', 'pig', 'fat', 'oppose', 'paper',
'oppose', 'brown', 'pig', 'fat', 'snail']
@pytest.mark.parametrize("method",
["",
None,
'no_method']
)
def test_make_distance_metrics_error(name_match, method):
with pytest.raises(TypeError):
name_match.set_distance_metrics([method])
@pytest.mark.parametrize("method, result",
[['indel', abd.Indel()],
['discounted_levenshtein', abd.DiscountedLevenshtein()],
['tichy', abd.Tichy()],
['cormodeL_z', abd.CormodeLZ()],
['iterative_sub_string', abd.IterativeSubString()],
['baulieu_xiii', abd.BaulieuXIII()],
['clement', abd.Clement()],
['dice_asymmetricI', abd.DiceAsymmetricI()],
['kuhns_iii', abd.KuhnsIII()],
['overlap', abd.Overlap()],
['pearson_ii', abd.PearsonII()],
['weighted_jaccard', abd.WeightedJaccard()],
['warrens_iv', abd.WarrensIV()],
['bag', abd.Bag()],
['rouge_l', abd.RougeL()],
['ratcliff_obershelp', abd.RatcliffObershelp()],
['ncd_bz2', abd.NCDbz2()],
['fuzzy_wuzzy_partial_string',
abd.FuzzyWuzzyPartialString()],
['fuzzy_wuzzy_token_sort', abd.FuzzyWuzzyTokenSort()],
['fuzzy_wuzzy_token_set', abd.FuzzyWuzzyTokenSet()],
['editex', abd.Editex()],
['typo', abd.Typo()],
['lig_3', abd.LIG3()],
['ssk', abd.SSK()],
['refined_soundex', abd.PhoneticDistance(transforms=abp.RefinedSoundex(
max_length=30), metric=abd.Levenshtein(), encode_alpha=True)],
['double_metaphone', abd.PhoneticDistance(transforms=abp.DoubleMetaphone(max_length=30), metric=abd.Levenshtein(), encode_alpha=True)]]
)
def test_make_distance_metrics(name_match, method, result):
name_match.set_distance_metrics([method])
assert type(name_match._distance_metrics.popitem()[1][0]) == type(result)
@pytest.mark.parametrize("kwargs_str, result_1, result_2, result_3, result_4",
[[{"ngrams": (4, 5)}, 0, False, (4, 5), 5000],
[{"low_memory": True}, 0, True, (2, 3), 5000],
[{"legal_suffixes": True}, 244, False, (2, 3), 5000],
[{"legal_suffixes": True, "number_of_rows": 8,
"ngrams": (1, 2, 3)}, 244, False, (1, 2, 3), 8],
])
def test_initialisation(kwargs_str, result_1, result_2, result_3, result_4):
name_match = nm.NameMatcher(**kwargs_str)
assert len(name_match._word_set) == result_1
assert name_match._low_memory == result_2
assert name_match._vec.ngram_range == result_3
assert name_match._number_of_rows == result_4
@pytest.mark.parametrize("occ, result_1, result_2, result_3, result_4, result_5",
[[1, '', '', '', '', ''],
[2, 'a-nd', 'Hndkiewicz,2Nicolas',
'Tashirian', '<NAME>', 'Marquardt,'],
[3, '<NAME>-nd', 'Hndkiewicz,2Nicolas',
'Runolfsson, <NAME>', '<NAME>', '<NAME>,'],
])
def test_preprocess_reduce(name_match, adjusted_name, occ, result_1, result_2, result_3, result_4, result_5):
name_match._column_matching = 'company_name'
new_names = name_match._preprocess_reduce(
adjusted_name, occurence_count=occ)
assert new_names.loc[1866, 'company_name'] == result_1
assert new_names.loc[1423, 'company_name'] == result_2
assert new_names.loc[268, 'company_name'] == result_3
assert new_names.loc[859, 'company_name'] == result_4
assert new_names.loc[1918, 'company_name'] == result_5
@pytest.mark.parametrize("col, start_pro, transform",
[['company_name', False, False],
['no_name', False, False],
['company_name', True, False],
['company_name', True, True],
['company_name', True, True],
])
def test_load_and_process_master_data(adjusted_name, col, start_pro, transform):
name_matcher = nm.NameMatcher()
name_matcher.load_and_process_master_data(
column=col,
df_matching_data=adjusted_name,
start_processing=start_pro,
transform=transform)
assert name_matcher._column == col
pd.testing.assert_frame_equal(
name_matcher._df_matching_data, adjusted_name)
assert name_matcher._preprocessed == start_pro
if transform & start_pro:
assert type(name_matcher._n_grams_matching) == csc_matrix
@pytest.mark.parametrize("trans, common",
[[False, False],
[True, False],
[False, True],
[True, True],
])
def test_process_matching_data(name_match, trans, common):
name_match._postprocess_common_words = common
name_match._process_matching_data(transform=trans)
assert name_match._preprocessed
if trans:
assert type(name_match._n_grams_matching) == csc_matrix
else:
assert name_match._n_grams_matching is None
if common:
assert len(name_match._word_set) > 0
else:
assert len(name_match._word_set) == 0
@pytest.mark.parametrize("lower_case, punctuations, ascii, result_1, result_2, result_3",
[[False, False, False, 'Schumm PLC', 'Towne, Johnston and Murray', 'Ösinski-Schinner'],
[True, False, False, 'schumm plc',
'towne, johnston and murray', 'ösinski-schinner'],
[False, True, False, 'Schumm PLC',
'Towne Johnston and Murray', 'ÖsinskiSchinner'],
[False, False, True, 'Schumm PLC',
'Towne, Johnston and Murray', 'Osinski-Schinner'],
[False, True, True, 'Schumm PLC',
'Towne Johnston and Murray', 'OsinskiSchinner'],
[True, False, True, 'schumm plc',
'towne, johnston and murray', 'osinski-schinner'],
[True, True, False, 'schumm plc',
'towne johnston and murray', 'ösinskischinner'],
[True, True, True, 'schumm plc',
'towne johnston and murray', 'osinskischinner'],
])
def test_preprocess(name_match, lower_case, punctuations, ascii, result_1, result_2, result_3):
name_match._preprocess_lowercase = lower_case
name_match._preprocess_punctuations = punctuations
name_match._preprocess_ascii = ascii
new_df = name_match.preprocess(
name_match._df_matching_data, 'company_name')
assert new_df.loc[0, 'company_name'] == result_1
assert new_df.loc[2, 'company_name'] == result_2
assert new_df.loc[784, 'company_name'] == result_3
@pytest.mark.parametrize("low_memory, ngrams, result_1, result_2, result_3",
[[1, (5, 6), 0.02579, 0.00781, 0.01738],
[6, (2, 3), 0.009695, 0.01022, 0.01120],
[8, (1, 2), 0.027087, 0.02765, 0.02910],
[0, (5, 6), 0.02579, 0.00781, 0.01738],
[0, (2, 3), 0.009695, 0.01022, 0.01120],
[0, (1, 2), 0.027087, 0.02765, 0.02910],
])
def test_transform_data(name_match, low_memory, ngrams, result_1, result_2, result_3):
name_match._low_memory = low_memory
name_match._vec = TfidfVectorizer(
lowercase=False, analyzer="char", ngram_range=ngrams)
name_match._process_matching_data(transform=False)
name_match.transform_data()
assert name_match._n_grams_matching.data[10] == pytest.approx(
result_1, 0.001)
assert name_match._n_grams_matching.data[181] == pytest.approx(
result_2, 0.001)
assert name_match._n_grams_matching.data[1000] == pytest.approx(
result_3, 0.001)
@pytest.mark.parametrize("to_be_matched, possible_matches, metrics, result",
[('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein'], 5),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], 7),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'iterative_sub_string'], 6),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandse Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'bag'], 11),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank',
'De Nederlandsche Bank', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein'], 4),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], 6),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'iterative_sub_string'], 6),
('De Nederlandsche Bank', ['Nederlandsche Bank', 'De Nederlancsh Bank', 'De Nederlandsche Bank', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'overlap', 'bag'], 6),
('Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman',
'Gerlach and Sons', 'Bank de Nederlandsche'], ['weighted_jaccard'], 2),
('Schumm PLC', ['Torphy-Corkery', 'Hansen, Hoppe and Tillman', 'Gerlach and Sons',
'Bank de Nederlandsche'], ['weighted_jaccard', 'discounted_levenshtein'], 4),
('Schumm PLC', ['Torphy-Corkery', '<NAME>', 'Gerlach and Sons', 'Bank de Nederlandsche'], [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], 6),
('Schumm PLC', ['Torphy-Corkery', '<NAME> and Tillman', 'Gerlach and Sons',
'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'iterative_sub_string'], 8),
('Schumm PLC', ['Torphy-Corkery', '<NAME>', 'Gerlach and Sons',
'Bank de Nederlandsche'], ['weighted_jaccard', 'overlap', 'bag'], 8)
])
def test_score_matches(to_be_matched, possible_matches, metrics, result):
name_match = nm.NameMatcher()
name_match.set_distance_metrics(metrics)
assert np.argmax(name_match._score_matches(
to_be_matched, possible_matches)) == result
@pytest.mark.parametrize("number_of_matches, match_score, metrics, result",
[(1, np.array([[0.9, 0.3, 0.5, 0.2, 0.1]]), ['weighted_jaccard'], [0]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]]), [
'weighted_jaccard', 'discounted_levenshtein'], [0, 1]),
(3, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [1, 0.2, 0.3, 0.2, 0.1]]), [
'weighted_jaccard', 'discounted_levenshtein', 'iterative_sub_string'], [2, 1, 1]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [
1, 0.2, 0.3, 0.2, 0.1]]), ['tichy', 'overlap', 'bag'], [2, 1]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5]]), [
'overlap', 'bag'], [0, 2]),
(1, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [
1, 0.2, 0.3, 0.2, 0.1]]), ['weighted_jaccard', 'overlap', 'iterative_sub_string'], [1]),
(2, np.array([[0.9, 0.3, 0.5, 0.2, 0.1], [0.6, 0.7, 0.8, 0.4, 0.5], [
1, 0.2, 0.3, 0.2, 0.1]]), ['weighted_jaccard', 'overlap', 'bag'], [1, 0]),
(1, np.array([[0.3, 0.3, 0.8, 0.2, 0.2]]), [
'weighted_jaccard'], [0]),
(3, np.array([[0.3, 0.3, 0.8, 0.2, 0.2], [0.3, 0.3, 0.8, 0.1, 0.1]]), [
'weighted_jaccard', 'discounted_levenshtein'], [0, 1]),
(2, np.array([[0.3, 0.3, 0.2, 0.1, 0.02], [0.1, 0.1, 0.2, 0.3, 0.02]]), [
'weighted_jaccard', 'iterative_sub_string'], [0, 0]),
(1, np.array([[0.3, 0.3, 0.2, 0.1, 0.02], [0.3, 0.3, 0.2, 0.3, 0.02]]), [
'overlap', 'iterative_sub_string'], [1]),
(1, np.array(
[[-0.5, -0.8, -0.3, -0.7, 0, 2]]), ['bag'], [0]),
(3, np.array([[10, 8, 7, 6, 12, 15, 14, 88]]), [
'weighted_jaccard'], [0]),
(2, np.array([[1, 0.3], [0.1, 0.4]]), [
'weighted_jaccard', 'discounted_levenshtein'], [0, 1])
])
def test_rate_matches(number_of_matches, match_score, metrics, result):
name_match = nm.NameMatcher()
name_match._number_of_matches = number_of_matches
name_match.set_distance_metrics(metrics)
ind = name_match._rate_matches(match_score)
print(ind)
assert len(ind) == np.min([number_of_matches, match_score.shape[0]])
assert list(ind) == result
def test_vectorise_data(name_match):
name_match._vectorise_data(transform=False)
assert len(name_match._vec.vocabulary_) > 0
@pytest.mark.parametrize("match, number_of_matches, word_set, score, result",
[(pd.Series(['Nederandsche', 0, 2, 'De Nederlandsche Bank'], index=['match_name_0', 'score_0', 'match_index_0', 'original_name']), 1, set(['De', 'Bank', 'nl']), 0, 94.553),
(pd.Series(['Nederandsche', 0, 2, 'De Nederlandsche Bank'], index=[
'match_name_0', 'score_0', 'match_index_0', 'original_name']), 1, set(['komt', 'niet', 'voor']), 0, 69.713),
(pd.Series(['nederandsche', 0, 2, 'de nederand bank', 0.4, 3, 'De Nederlandsche Bank'], index=[
'match_name_0', 'score_0', 'match_index_0', 'match_name_1', 'score_1', 'match_index_1', 'original_name']), 1, set(['De', 'Bank', 'nl']), 1, 0.4),
(pd.Series(['nederandsche', 0, 2, 'de nederand bank', 0.4, 3, 'De Nederlandsche Bank'], index=[
'match_name_0', 'score_0', 'match_index_0', 'match_name_1', 'score_1', 'match_index_1', 'original_name']), 1, set(['De', 'Bank', 'nl']), 0, 86.031),
])
def test_postprocess(name_match, match, number_of_matches, word_set, score, result):
name_match._number_of_matches = number_of_matches
name_match._word_set = word_set
new_match = name_match.postprocess(match)
assert new_match.loc[f'score_{score}'] == pytest.approx(result, 0.0001)
@pytest.mark.parametrize("indicator, punctuations, word_set, cut_off, result_1, result_2",
[('legal', False, set(), 0.01, 'plc.', 'bedrijf'),
('legal', True, set(), 0.01, 'plc', 'bedrijf'),
('legal', True, set(['bedrijf']),
0.01, 'bedrijf', 'Group'),
('common', True, set(), 0.01, 'Group', 'West'),
('common', True, set(), 0.3, 'and', 'Group'),
('common', True, set(['West']),
0.3, 'West', 'bedrijf'),
('someting', True, set(['key']), 0.01, 'key', 'val')
])
def test_make_no_scoring_words(name_match, indicator, punctuations, word_set, cut_off, result_1, result_2):
name_match._preprocess_punctuations = punctuations
new_word_set = name_match._make_no_scoring_words(
indicator, word_set, cut_off)
print(new_word_set)
assert new_word_set.issuperset(set([result_1]))
assert not new_word_set.issuperset(set([result_2]))
def test_search_for_possible_matches_error(adjusted_name):
name_matcher = nm.NameMatcher()
with pytest.raises(RuntimeError):
name_matcher._search_for_possible_matches(adjusted_name)
@pytest.mark.parametrize("top_n, low_memory, result_1, result_2",
[(10, 0, 1518, 144),
(50, 0, 1992, 9),
(100, 0, 1999, 6),
(1, 0, 44, 144),
(10, 8, 1518, 144),
(50, 8, 1992, 9),
(100, 8, 1999, 6),
(1, 8, 44, 144)
])
def test_search_for_possible_matches(name_match, adjusted_name, top_n, low_memory, result_1, result_2):
name_match._column_matching = 'company_name'
name_match._low_memory = low_memory
name_match._top_n = top_n
name_match._process_matching_data(True)
possible_match = name_match._search_for_possible_matches(adjusted_name)
assert possible_match.shape[1] == top_n
assert np.max(possible_match) < len(adjusted_name)
assert np.all(possible_match.astype(int) == possible_match)
assert np.max(possible_match[44, :]) == result_1
assert np.min(possible_match[144, :]) == result_2
@pytest.mark.parametrize("common_words, num_matches, possible_matches, matching_series, result_0, result_1",
[(True, 3, np.array([29, 343, 727, 855, 1702]), pd.Series(
['Company and Sons'], index=['company_name']), 36.03, 31.33),
(False, 2, np.array([29, 343, 727, ]), pd.Series(
['Company and Sons'], index=['company_name']), 71.28, 68.6),
(False, 2, np.array([29, 343]), pd.Series(
['Company and Sons'], index=['company_name']), 71.28, 68.6),
(False, 2, np.array([[29, 343], [0, 0]]), pd.Series(
['Company and Sons'], index=['company_name']), 71.28, 68.6),
(False, 2, np.array([29, 343, 727, 855, 1702]), pd.Series(
['Company and Sons'], index=['company_name']), 72.28, 71.28)
])
def test_fuzzy_matches(name_match, common_words, num_matches, possible_matches, matching_series, result_0, result_1):
name_match._column_matching = 'company_name'
name_match._number_of_matches = num_matches
name_match._postprocess_common_words = common_words
name_match._word_set = set(['Sons', 'and'])
match = name_match.fuzzy_matches(possible_matches, matching_series)
assert match['score_0'] == pytest.approx(result_0, 0.0001)
assert match['score_1'] == pytest.approx(result_1, 0.0001)
assert match['match_index_0'] in possible_matches
assert match['match_index_1'] in possible_matches
def test_do_name_matching_full(name_match, adjusted_name):
result = name_match.match_names(adjusted_name, 'company_name')
assert np.sum(result['match_index'] == result.index) == 1922
def test_do_name_matching_split(name_match, adjusted_name):
name_match._preprocess_split = True
result = name_match.match_names(adjusted_name.iloc[44, :], 'company_name')
assert np.any(result['match_index'] == 44)
def test_do_name_matching_series(name_match, adjusted_name):
result = name_match.match_names(adjusted_name.iloc[44, :], 'company_name')
assert np.any(result['match_index'] == 44)
def test_do_name_matching_error(adjusted_name):
name_match = nm.NameMatcher()
with pytest.raises(ValueError):
name_match.match_names(adjusted_name, 'company_name')
@pytest.mark.parametrize("verbose", [True, False])
def test_do_name_matching_print(capfd, name_match, adjusted_name, verbose):
name_match._verbose = verbose
name_match.match_names(adjusted_name.iloc[:5].copy(), 'company_name')
out, err = capfd.readouterr()
if verbose:
assert out.find('preprocessing') > -1
assert out.find('searching') > -1
assert out.find('possible') > -1
assert out.find('fuzzy') > -1
assert out.find('done') > -1
else:
assert out == ''
@pytest.mark.parametrize("word, occurence_count, result",
[['fun snail pool', 2, 'snail'],
['fun snail pool', 3, 'fun snail'],
['fun snail pool', 1, ''],
['fun small pool', 3, 'fun small pool'],
['fun snail', 3, 'fun snail'],
['fun small pool', 5, 'fun small pool']])
def test_select_top_words(word, words, occurence_count, result):
word_counts = pd.Series(words).value_counts()
name_match = nm.NameMatcher()
new_word = name_match._select_top_words(
word.split(), word_counts, occurence_count)
assert new_word == result
@pytest.mark.parametrize("match, num_of_matches, result",
[[{'match_name_1': 'fun', 'match_name_2': 'dog',
'match_name_0': 'cat'}, 3, ['cat', 'fun', 'dog']],
[{'match_name_1': 'fun', 'match_name_2': 'dog',
'match_name_0': 'cat'}, 2, ['cat', 'fun']],
[{'match_name_1': 'fun', 'match_name_0': 'cat'},
2, ['cat', 'fun']],
[{'match_name_1': 'fun', 'match_name_2': 'dog', 'match_name_0': 'cat'}, 0, []]])
def test_get_alternative_names(match, num_of_matches, result):
name_match = nm.NameMatcher(number_of_matches=num_of_matches)
res = name_match._get_alternative_names(pd.Series(match))
assert res == result
@pytest.mark.parametrize("preprocess_punctuations, output, input, x",
[[True, '_blame_', {'test': ['fun...', 'done'], 'num':['_.blame._']}, 2],
[True, 'done', {'test': ['fun. . . ',
'done'], 'num':['_.blame._']}, 1],
[True, 'fun', {
'test': ['fun. . . ', 'done'], 'num':['_.blame._']}, 0],
[False, 'fun. . .', {
'test': ['fun. . . ', 'done'], 'num':['_.blame._']}, 0],
[False, 'fun. . .', {
'num': ['_.blame._'], 'test': ['fun. . . ', 'done']}, 1]
])
def test_preprocess_word_list(preprocess_punctuations, output, input, x):
name_match = nm.NameMatcher(punctuations=preprocess_punctuations)
res = name_match._preprocess_word_list(input)
print(res)
assert res[x] == output
@pytest.mark.parametrize("num_matches, match_score, match, result, y",
[[3, np.array([[1, 1, 1], [1, 1, 1], [0, 0, 0]]), pd.Series(dtype=float), 100, 0],
[2, np.array([[1, 1], [0.4, 0.4], [0, 0]]),
pd.Series(dtype=float), 40, 1],
[1, np.array([[1, 1], [1, 1], [0, 0]]),
pd.Series(dtype=float), 100, 0]
])
def test_adjust_scores(num_matches, match_score, match, result, y):
name_match = nm.NameMatcher(number_of_matches=num_matches)
match = name_match._adjust_scores(match_score, match)
assert match[y] == result
@pytest.mark.parametrize("string, stringlist, result_1, result_2, y",
[['know sign first', ['know', 'know sign', 'know sign first'], 'know first', 'know first', 2],
['know sign first', ['know', 'know sign',
'know sign first'], 'know first', 'know', 1],
['know sign first', ['know', 'know sign',
'know sign first'], 'know first', 'know', 0],
['know first', ['know', 'know', 'know'],
'know first', 'know', 1],
['pool sign small', ['sign small',
'small pool sign', 'small'], '', '', 0],
['pool sign small know', ['sign small',
'small pool sign', 'small'], 'know', '', 0],
['know pool sign small', ['sign small',
'small pool sign', 'small'], 'know', '', 0],
['pool sign small', ['sign small',
'small pool know sign', 'small'], '', 'know', 1],
])
def test_process_words(words, string, stringlist, result_1, result_2, y):
name_match = nm.NameMatcher()
name_match._word_set = set(words)
string, stringlist = name_match._process_words(string, stringlist)
assert string == result_1
assert stringlist[y] == result_2
@pytest.mark.parametrize("word_set, cut_off, result_1, result_2",
[[set(), 0, 1518, 'Group'],
[set(), 0, 1518, 'and'],
[set(), 0.1, 7, 'Group'],
[set(), 0.1, 7, 'LLC'],
[set(), 0.12, 6, 'LLC'],
[set(), 0.2, 1, 'and'],
[set(['apple']), 1, 1, 'apple'],
[set(['apple']), 0, 1519, 'apple'],
[set(['apple']), 0, 1519, 'Group']
])
def test_process_common_words(name_match, word_set, cut_off, result_1, result_2):
words = name_match._process_common_words(word_set, cut_off)
assert result_2 in words
assert len(words) == result_1
@pytest.mark.parametrize("word_set, preprocess, result_1, result_2",
[[set(), True, 244, 'company'],
[set(), True, 244, '3ao'],
[set(), True, 244, 'gmbh'],
[set(), False, 312, '& company'],
[set(), False, 312, '3ao'],
[set(), False, 312, 'g.m.b.h.'],
[set(['apple']), True, 245, 'apple'],
[set(['apple']), False, 313, 'apple'],
[set(['apple..']), True, 245, 'apple..'],
[set(['apple..']), False, 313, 'apple..']
])
def test_process_legal_words(word_set, preprocess, result_1, result_2):
name_match = nm.NameMatcher()
name_match._preprocess_punctuations = preprocess
words = name_match._process_legal_words(word_set)
assert result_2 in words
assert len(words) == result_1
| StarcoderdataPython |
26447 | from .base import ENDPOINT, process_response
class ReviewsMixin:
@process_response
def get_review_details(self, review_id, **kwargs):
"""
GET /review/{review_id}
"""
url = f"{ENDPOINT}/3/review/{review_id}"
return self.make_request("GET", url, kwargs)
| StarcoderdataPython |
1715264 | class atl_IDE_webappserver(tornado.web.Application):
class WSBlocksSubHandler(tornado.websocket.WebSocketHandler):
def open(self):
self.get_logger().info('[Server] WSBlocksSub client connected')
self.application._blocks = self
def on_close(self):
self.get_logger().info('[Server] WSBlocksSub client disconnected')
def on_message(self, message):
self.get_logger().info('[Server] WSBlocksSub client message: ' + message)
# echo message back to client
self.write_message(message)
class WSBlocksPubHandler(tornado.websocket.WebSocketHandler):
def open(self):
self.get_logger().info('[Server] WSBlocksSPub Handler client connected')
def on_message(self, message):
try:
if self.application._blocks:
self.application._blocks.write_message(message)
except Exception:
pass
def on_close(self):
self.get_logger().info('[Server] WSConsolePub client disconnected')
class WSConsoleSubHandler(tornado.websocket.WebSocketHandler):
def open(self):
self.get_logger().info('[Server] WSConsoleSub client connected')
self.application._console = self
def on_close(self):
self.get_logger().info('[Server] WSConsoleSub client disconnected')
def on_message(self, message):
self.get_logger().info('[Server] WSConsoleSub client message: ' + message)
# echo message back to client
self.write_message(message)
class WSConsolePubHandler(tornado.websocket.WebSocketHandler):
def open(self):
self.get_logger().info('[Server] WSConsolePub client connected')
def on_message(self, message):
try:
if self.application._console:
self.application._console.write_message(message)
except Exception:
pass
def on_close(self):
self.get_logger().info('[Server] WSConsolePub client disconnected')
# Websocket communication to Cozmo camera tab so that we call follow what Cozmo sees and does
class WSCozmo_messagesSubHandler(tornado.websocket.WebSocketHandler):
def open(self):
self.get_logger().info('[Server] WSCozmo_messagesSub client connected')
self.application._cozmo_messages = self
def on_close(self):
self.get_logger().info('[Server] WSCozmo_messagesSub client disconnected')
def on_message(self, message):
self.get_logger().info('[Server] WSCozmo_messagesSub client message: ' + message)
# echo message back to client
self.write_message(message)
class WSCozmo_messagesPubHandler(tornado.websocket.WebSocketHandler):
def open(self):
self.get_logger().info('[Server] WSCozmo_messagesPub client connected')
def on_message(self, message):
try:
if self.application._cozmo_messages:
self.application._cozmo_messages.write_message(message)
except Exception:
pass
def on_close(self):
self.get_logger().info('[Server] WSCozmo_messagesPub client disconnected')
# Websocket communicatio to Cozmo 3D tab
class WS3dSubHandler(tornado.websocket.WebSocketHandler):
def open(self):
self.get_logger().info('[Server] 3dSub client connected')
self.application._ws3d = self
def on_close(self):
self.get_logger().info('[Server] 3dSub client disconnected')
def on_message(self, message):
self.get_logger().info('[Server] 3dSub client message: ' + message)
# echo message back to client
self.write_message(message)
class WS3dPubHandler(tornado.websocket.WebSocketHandler):
def open(self):
self.get_logger().info('[Server] 3dPub client connected')
def on_message(self, message):
try:
if self.application._ws3d:
self.application._ws3d.write_message(message)
except Exception:
pass
def on_close(self):
self.get_logger().info('[Server] 3dPub client disconnected')
class WSCameraSubHandler(tornado.websocket.WebSocketHandler):
def open(self):
self.get_logger().info('[Server] CameraSub client connected')
self.application._wsCamera = self
def on_close(self):
self.get_logger().info('[Server] CameraSub client disconnected')
class WSCameraPubHandler(tornado.websocket.WebSocketHandler):
def open(self):
self.get_logger().info('[Server] CameraPub client connected')
def on_message(self, message):
try:
if self.application._wsCamera:
self.application._wsCamera.write_message(message, binary=True)
except Exception:
pass
def on_close(self):
self.get_logger().info('[Server] CameraPub client disconnected')
class RobotSubmitHandler(tornado.web.RequestHandler):
@gen.coroutine
def post(self):
data = self.request.body
try:
code = str(data, 'utf-8')
self.get_logger().info('Received code: ')
self.get_logger().info(code)
with (yield self.application._lock.acquire()):
self.application._executor.start(code)
self.write('OK')
except Exception as e:
err = str(e)
raise tornado.web.HTTPError(500, err, reason=err)
class RobotTerminateHandler(tornado.web.RequestHandler):
@gen.coroutine
def post(self):
self.get_logger().info('Terminating code')
with (yield self.application._lock.acquire()):
self.application._executor.stop()
self.write('OK')
class NoCacheStaticFileHandler(tornado.web.StaticFileHandler):
def set_extra_headers(self, path):
# Disable cache
self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
class SavesHandler(tornado.web.RequestHandler):
def get(self, folder, file):
file = file.strip('/')
if len(file) == 0:
# Send folder index
storedFiles = []
removeXmlRegex = re.compile('\.xml$')
for filename in os.listdir(folder):
relativePath = os.path.join(folder, filename)
if os.path.isfile(relativePath) and filename.endswith('.xml'):
storedFiles.append(removeXmlRegex.sub('', filename))
self.get_logger().info("Returning files list: " + str(storedFiles))
self.write(json.dumps(storedFiles).encode('utf-8'))
self.set_header('Content-type','application/json')
else:
# Send only one file
f = open(os.path.join(folder, file + '.xml'), 'r')
data = f.read()
f.close()
self.write(data)
def put(self, folder, file):
file = file.strip('/')
self.get_logger().info('[Server] SavesHandler: Saving ' + file)
data = self.request.body
f = open(os.path.join(folder, file + '.xml'), 'wb')
f.write(data)
f.close()
class ATLHomeHandler(tornado.web.RequestHandler):
def initialize(self, args):
self.args = args
def get(self, path):
path = '../atl-blockly/' + path
self.render(path + 'index.html')
class HomeHandler(tornado.web.RequestHandler):
def initialize(self, args):
self.args = args
def get(self, path):
cozmo_blockly_path = '../cozmo-blockly/' + path
thymio_blockly_path = '../thymio-blockly/' + path
loader = tornado.template.Loader(cozmo_blockly_path, whitespace='all')
file = 'includes.template'
if self.args.dev:
file = 'includes_debug.template'
t = loader.load(file)
includes = t.generate()
# Modularisation of the toolbox depending on the available robots
loader = tornado.template.Loader(cozmo_blockly_path, whitespace='all')
file = 'cozmo_blocks.xml'
t = loader.load(file)
cozmo_blocks = t.generate()
loader = tornado.template.Loader(thymio_blockly_path, whitespace='all')
file = 'thymio_blocks.xml'
t = loader.load(file)
thymio_blocks = t.generate()
if self.args.nonsecure:
nonsec = 'true'
else:
nonsec = 'false'
self.get_logger().info('[Server] HomeHandler: Loading ' + cozmo_blockly_path + ' and ' + thymio_blockly_path)
self.render(cozmo_blockly_path + 'index.html', includes=includes, cozmo_blocks=cozmo_blocks, thymio_blocks=thymio_blocks, name=self.args.name, nonsecure=nonsec)
def stop(self):
# with (yield self._lock.acquire()):
# self._executor.stop()
# tornado.ioloop.IOLoop.instance().stop()
self._executor.stop()
tornado.ioloop.IOLoop.instance().stop()
def start(args):
global cozmoBlockly, nodejs
app = CozmoBlockly([
(r'/()', CozmoBlockly.ATLHomeHandler, dict(args=args)),
(r'/EN/()', CozmoBlockly.HomeHandler , dict(args=args)),
(r'/FR/()', CozmoBlockly.HomeHandler , dict(args=args)),
(r'/EN/(.+)', tornado.web.StaticFileHandler if not args.dev else CozmoBlockly.NoCacheStaticFileHandler, dict(path='../cozmo-blockly')),
(r'/FR/(.+)', tornado.web.StaticFileHandler if not args.dev else CozmoBlockly.NoCacheStaticFileHandler, dict(path='../cozmo-blockly')),
(r'/static/(.*)', tornado.web.StaticFileHandler if not args.dev else CozmoBlockly.NoCacheStaticFileHandler,dict(path='../gallery')),
(r'/blockly/(.*)', tornado.web.StaticFileHandler if not args.dev else CozmoBlockly.NoCacheStaticFileHandler, dict(path='../blockly')),
(r'/thymio-blockly/(.*)', tornado.web.StaticFileHandler if not args.dev else CozmoBlockly.NoCacheStaticFileHandler,dict(path='../thymio-blockly')),
(r'/atl-blockly/(.*)',tornado.web.StaticFileHandler if not args.dev else CozmoBlockly.NoCacheStaticFileHandler,dict(path='../atl-blockly')),
(r'/closure-library/(.*)', tornado.web.StaticFileHandler if not args.dev else CozmoBlockly.NoCacheStaticFileHandler, dict(path='../closure-library')),
(r'/(saves)/(.*)', CozmoBlockly.SavesHandler),
(r'/robot/submit', CozmoBlockly.RobotSubmitHandler),
(r'/robot/terminate', CozmoBlockly.RobotTerminateHandler),
(r'/camSub', CozmoBlockly.WSCameraSubHandler),
(r'/camPub', CozmoBlockly.WSCameraPubHandler),
(r'/WsSub', CozmoBlockly.WS3dSubHandler),
(r'/WsPub', CozmoBlockly.WS3dPubHandler),
(r'/blocksSub', CozmoBlockly.WSBlocksSubHandler),
(r'/blocksPub', CozmoBlockly.WSBlocksPubHandler),
(r'/consoleSub', CozmoBlockly.WSConsoleSubHandler),
(r'/consolePub', CozmoBlockly.WSConsolePubHandler),
(r'/cozmo_messagesSub', CozmoBlockly.WSCozmo_messagesSubHandler),
(r'/cozmo_messagesPub', CozmoBlockly.WSCozmo_messagesPubHandler),
])
cozmoBlockly = app
if not args.nonsecure:
try:
nodejs = Popen(['node', '../nodejs/headless.js'])
except FileNotFoundError as e:
self.get_logger().info("node.js wasn't found in your system.")
self.get_logger().info("Secure mode (default) requires node.js. Please install node.js and follow README instructions.")
return
self.get_logger().info('[Server] Starting server...')
tornado.platform.asyncio.AsyncIOMainLoop().install()
if args.dev:
self.get_logger().info('[Server] Running in debug mode')
app.listen(8080)
app._executor = CodeExecutor(args.nonsecure, args.nocozmo, args.aruco)
app._lock = locks.Lock()
app._wsCamera = None
app._ws3d = None
app._ioloop = tornado.ioloop.IOLoop.current()
self.get_logger().info('[Server] Started, awaiting requests...')
self.get_logger().info('===========================================================================')
app._ioloop.start()
self.get_logger().info('[Server] Server stopped')
| StarcoderdataPython |
3314081 | # Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sensor for a robot arm."""
from typing import Dict
from dm_control import mjcf
from dm_control.composer.observation import observable
from dm_robotics.moma import sensor as moma_sensor
from dm_robotics.moma.models.robots.robot_arms import robot_arm
from dm_robotics.moma.sensors import joint_observations
import numpy as np
class RobotArmSensor(moma_sensor.Sensor):
"""Robot arm sensor providing joint-related observations for sim arms."""
def __init__(self, arm: robot_arm.RobotArm, name: str,
have_torque_sensors: bool = True):
self._arm = arm
self._name = name
self._observables = {
self.get_obs_key(joint_observations.Observations.JOINT_POS):
observable.Generic(self._joint_pos),
self.get_obs_key(joint_observations.Observations.JOINT_VEL):
observable.Generic(self._joint_vel),
}
if have_torque_sensors:
obs_key = self.get_obs_key(joint_observations.Observations.JOINT_TORQUES)
self._observables[obs_key] = observable.Generic(self._joint_torques)
for obs in self._observables.values():
obs.enabled = True
def initialize_episode(self, physics: mjcf.Physics,
random_state: np.random.RandomState) -> None:
pass
@property
def observables(self) -> Dict[str, observable.Observable]:
return self._observables
@property
def name(self) -> str:
return self._name
def get_obs_key(self, obs: joint_observations.Observations) -> str:
return obs.get_obs_key(self._name)
def _joint_pos(self, physics: mjcf.Physics) -> np.ndarray:
return physics.bind(self._arm.joints).qpos # pytype: disable=attribute-error
def _joint_vel(self, physics: mjcf.Physics) -> np.ndarray:
return physics.bind(self._arm.joints).qvel # pytype: disable=attribute-error
def _joint_torques(self, physics: mjcf.Physics) -> np.ndarray:
return physics.bind(self._arm.joint_torque_sensors).sensordata[2::3] # pytype: disable=attribute-error
| StarcoderdataPython |
190162 | """Load notebook and cells"""
import argparse
import tarfile
import os
import chardet
from reproducemegit.jupyter_reproducibility import config
from reproducemegit.jupyter_reproducibility import consts
from reproducemegit.jupyter_reproducibility.db import RequirementFile, Repository, connect
from reproducemegit.jupyter_reproducibility.utils import vprint, join_paths, StatusLogger, check_exit, savepid
from reproducemegit.jupyter_reproducibility.utils import find_files_in_path, find_files_in_zip, mount_basedir
from reproducemegit.jupyter_reproducibility.config import Path
def process_requirement_file(session, repository, reqformat, skip_if_error=consts.R_REQUIREMENTS_ERROR):
"""Process requirement file"""
MAP = {
"setup.py": "setup",
"requirements.txt": "requirement",
"Pipfile": "pipfile",
"Pipfile.lock": "pipfile_lock"
}
zip_path = None
tarzip = None
if not repository.path.exists():
if not repository.zip_path.exists():
repository.processed |= consts.R_UNAVAILABLE_FILES
session.add(repository)
vprint(1, "Failed to load requirement {} due <repository not found>".format(reqformat))
return False
tarzip = tarfile.open(str(repository.zip_path))
zip_path = Path(repository.hash_dir2)
finished = True
req_param = MAP[reqformat] + "_names"
for name in getattr(repository, req_param):
if not name:
continue
try:
vprint(2, "Loading requirement {}".format(name))
if tarzip:
content = tarzip.extractfile(tarzip.getmember(str(zip_path / name))).read()
else:
with open(str(repository.path / name), "rb") as ofile:
content = ofile.read()
coding = chardet.detect(content)
if coding["encoding"] is None:
vprint(3, "Codec not detected")
continue
try:
content = content.decode(coding['encoding'])
except UnicodeDecodeError:
vprint(3, "Invalid codec")
continue
if '\0' in content:
vprint(3, "NULL byte in content")
continue
requirement_file = RequirementFile(
repository_id=repository.id,
name=name,
reqformat=reqformat,
content=content,
processed=consts.F_OK,
)
session.add(requirement_file)
except Exception as err:
repository.processed |= skip_if_error
session.add(repository)
vprint(1, "Failed to load requirement {} due {!r}".format(name, err))
if config.VERBOSE > 4:
import traceback
traceback.print_exc()
finished = False
if tarzip:
tarzip.close()
return finished
def collect_requirements(session, repository):
if repository.path.exists():
vprint(2, "using path")
setups, requirements, pipfiles, pipfile_locks = find_files_in_path(
repository.path, [
"setup.py", "requirements.txt", "Pipfile", "Pipfile.lock"
]
)
changed = True
elif repository.zip_path.exists():
vprint(2, "using zip")
with tarfile.open(str(repository.zip_path)) as tarzip:
setups, requirements, pipfiles, pipfile_locks = find_files_in_zip(
tarzip, Path(repository.hash_dir2), [
"setup.py", "requirements.txt", "Pipfile", "Pipfile.lock"
]
)
changed = True
else:
vprint(2, "not found")
repository.processed |= consts.R_UNAVAILABLE_FILES
changed = False
if changed:
repository.setups_count = len(setups)
repository.requirements_count=len(requirements)
repository.pipfiles_count = len(pipfiles)
repository.pipfile_locks_count = len(pipfile_locks)
repository.setups = join_paths(setups)
repository.requirements = join_paths(requirements)
repository.pipfiles = join_paths(pipfiles)
repository.pipfile_locks = join_paths(pipfile_locks)
session.add(repository)
session.commit()
def process_repository(session, repository, skip_if_error=consts.R_REQUIREMENTS_ERROR):
"""Process repository"""
if repository.processed & (consts.R_REQUIREMENTS_OK + skip_if_error):
return "already processed"
if repository.processed & skip_if_error:
session.add(repository)
repository.processed -= skip_if_error
finished = True
finished &= process_requirement_file(session, repository, "setup.py", skip_if_error)
finished &= process_requirement_file(session, repository, "requirements.txt", skip_if_error)
finished &= process_requirement_file(session, repository, "Pipfile", skip_if_error)
finished &= process_requirement_file(session, repository, "Pipfile.lock", skip_if_error)
if finished and not repository.processed & skip_if_error:
repository.processed |= consts.R_REQUIREMENTS_OK
session.add(repository)
return "done"
def apply(
session, status, selected_repositories, skip_if_error,
count, interval, reverse, check
):
while selected_repositories:
filters = [
Repository.processed.op("&")(consts.R_REQUIREMENTS_OK) == 0,
Repository.processed.op("&")(skip_if_error) == 0,
]
if selected_repositories is not True:
filters += [
Repository.id.in_(selected_repositories[:30])
]
selected_repositories = selected_repositories[30:]
else:
selected_repositories = False
if interval:
filters += [
Repository.id >= interval[0],
Repository.id <= interval[1],
]
query = session.query(Repository).filter(*filters)
if count:
print(query.count())
return
if reverse:
query = query.order_by(
Repository.id.desc()
)
else:
query = query.order_by(
Repository.id.asc()
)
for repository in query:
if check_exit(check):
vprint(0, "Found .exit file. Exiting")
return
status.report()
vprint(0, "Extracting requirement files from {}".format(repository))
with mount_basedir():
result = process_repository(session, repository, skip_if_error)
vprint(1, result)
status.count += 1
session.commit()
def main():
"""Main function"""
script_name = os.path.basename(__file__)[:-3]
parser = argparse.ArgumentParser(
description="Extract requirement files from registered repositories")
parser.add_argument("-v", "--verbose", type=int, default=config.VERBOSE,
help="increase output verbosity")
parser.add_argument("-n", "--repositories", type=int, default=None,
nargs="*",
help="repositories ids")
parser.add_argument("-i", "--interval", type=int, nargs=2,
default=config.REPOSITORY_INTERVAL,
help="id interval")
parser.add_argument("-e", "--retry-errors", action='store_true',
help="retry errors")
parser.add_argument("-c", "--count", action='store_true',
help="count results")
parser.add_argument('-r', '--reverse', action='store_true',
help='iterate in reverse order')
parser.add_argument('--check', type=str, nargs='*',
default={'all', script_name, script_name + '.py'},
help='check name in .exit')
args = parser.parse_args()
config.VERBOSE = args.verbose
status = None
if not args.count:
status = StatusLogger(script_name)
status.report()
with connect() as session, savepid():
apply(
session,
status,
args.repositories or True,
0 if args.retry_errors else consts.R_REQUIREMENTS_ERROR,
args.count,
args.interval,
args.reverse,
set(args.check)
)
if __name__ == "__main__":
main()
| StarcoderdataPython |
109202 | <reponame>yasserglez/pytiger2c<filename>packages/pytiger2c/ast/binaryoperatornode.py
# -*- coding: utf-8 -*-
"""
Clase C{BinaryOperatorNode} del árbol de sintáxis abstracta.
"""
from pytiger2c.ast.operatornode import OperatorNode
class BinaryOperatorNode(OperatorNode):
"""
Clase C{BinaryOperatorNode} del árbol de sintáxis abstracta.
Representa la clase base para los operadores que se realizan
entre dos expresiones. De esta clase heredan los operadores
aritméticos y lógicos.
"""
def _get_left(self):
"""
Método para obtener el valor de la propiedad C{left}.
"""
return self._left
left = property(_get_left)
def _get_right(self):
"""
Método para obtener el valor de la propiedad C{right}.
"""
return self._right
right = property(_get_right)
def __init__(self, left, right):
"""
Inicializa la clase C{BinaryOperatorNode}.
@type left: C{LanguageNode}
@param left: Nodo del árbol de sintáxis abstracta correspondiente
a la expresión a la izquierda del operador.
@type right: C{LanguageNode}
@param right: Nodo del árbol de sintáxis abstracta correspondiente
a la expresión a la derecha del operador.
"""
super(BinaryOperatorNode, self).__init__()
self._left = left
self._right = right
def generate_dot(self, generator):
"""
Genera un grafo en formato Graphviz DOT correspondiente al árbol de
sintáxis abstracta del programa Tiger del cual este nodo es raíz.
Para obtener información acerca de los parámetros recibidos por
este método consulte la documentación del método C{generate_dot}
de la clase C{LanguageNode}.
"""
me = generator.add_node(str(self.__class__.__name__))
left = self.left.generate_dot(generator)
right = self.right.generate_dot(generator)
generator.add_edge(me, left)
generator.add_edge(me, right)
return me
| StarcoderdataPython |
3266699 | # DO NOT MODIFY THIS FILE!!!
from unittest import TestCase
from lab_questions import q3_which_number_is_larger as q3
class Q3Test(TestCase):
def test_which_number_larger_same_numbers(self):
msg = 'When called with %.1f and %.1f, which_number_is_larger() should return "same"'
self.assertEqual('same', q3.which_number_is_larger(1.5, 1.5), msg=msg % (1.5, 1.5))
self.assertEqual('same', q3.which_number_is_larger(-6, -6), msg=msg % (-6, -6))
self.assertEqual('same', q3.which_number_is_larger(3, 3), msg=msg % (3, 3))
self.assertEqual('same', q3.which_number_is_larger(-1000, -1000), msg=msg % (-1000, -1000))
def test_which_number_larger_first_larger(self):
msg = 'When called with %.1f and %.1f, which_number_is_larger() should return "first"'
self.assertEqual('first', q3.which_number_is_larger(10, 2), msg=msg % (10, 2))
self.assertEqual('first', q3.which_number_is_larger(-1, -100), msg=msg % (-1, -100))
self.assertEqual('first', q3.which_number_is_larger(1, -100), msg=msg % (1, -100))
self.assertEqual('first', q3.which_number_is_larger(5.5, 4.4), msg=msg % (5.5, 4.4))
def test_which_number_larger_second_larger(self):
msg = 'When called with %.1f and %.1f, which_number_is_larger() should return "second"'
self.assertEqual('second', q3.which_number_is_larger(7, 20), msg=msg % (7, 20))
self.assertEqual('second', q3.which_number_is_larger(-1, 100), msg=msg % (-1, 100))
self.assertEqual('second', q3.which_number_is_larger(-100, -10), msg=msg % (-100, -10))
self.assertEqual('second', q3.which_number_is_larger(20, 55.5), msg=msg % (20, 55.5))
| StarcoderdataPython |
3255381 | <gh_stars>1-10
'''
Copyright (c) Microsoft Open Technologies, Inc. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
Code to read data from a Decagon 10HS moisture sensor connected to a RedBear BLE Nano micro controller, then augment and format as JSON to send via
socket connection to a gateway. Example of sending moisture to Microsoft Azure and analyzing with Azure Stream Analytics or Azure Machine Learning.
Real time output viewable at http://connectthedots.msopentech.com .
'''
# code based on https://github.com/switchdoclabs/iBeacon-Scanner-
DEBUG = False
import os
import sys
import struct
import bluetooth._bluetooth as bluez
LE_META_EVENT = 0x3e
LE_PUBLIC_ADDRESS=0x00
LE_RANDOM_ADDRESS=0x01
LE_SET_SCAN_PARAMETERS_CP_SIZE=7
OGF_LE_CTL=0x08
OCF_LE_SET_SCAN_PARAMETERS=0x000B
OCF_LE_SET_SCAN_ENABLE=0x000C
OCF_LE_CREATE_CONN=0x000D
LE_ROLE_MASTER = 0x00
LE_ROLE_SLAVE = 0x01
# these are actually sub-events of LE_META_EVENT
EVT_LE_CONN_COMPLETE=0x01
EVT_LE_ADVERTISING_REPORT=0x02
EVT_LE_CONN_UPDATE_COMPLETE=0x03
EVT_LE_READ_REMOTE_USED_FEATURES_COMPLETE=0x04
# Advertisement event types
ADV_IND=0x00
ADV_DIRECT_IND=0x01
ADV_SCAN_IND=0x02
ADV_NONCONN_IND=0x03
ADV_SCAN_RSP=0x04
def eventHandler(macAddress, value):
f(macAddress,value)
class BLEMoistureSensor:
sock = None
callback = None
dev_id = 0
def __init__(self) :
try:
self.sock = bluez.hci_open_dev(self.dev_id)
old_filter = self.sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
enable = 1
cmd_pkt = struct.pack("<BB", enable, 0x00)
bluez.hci_send_cmd(self.sock, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, cmd_pkt)
except:
print "error accessing blue tooth device..."
sys.exit(1)
def printpacket(self, pkt):
print "in printpacket"
for c in pkt:
sys.stdout.write("%02x " % struct.unpack("B",c)[0])
def packed_bdaddr_to_string(self, bdaddr_packed):
return ''.join('%02x'%i for i in struct.unpack("<BBBBBB", bdaddr_packed[::-1]))
# func( macAddress, value )
def setSensorDataAvailableEvent(self, func):
self.callback = func
def Listen(self):
try:
old_filter = self.sock.getsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, 14)
# perform a device inquiry on blue tooth device #0
# The inquiry should last 8 * 1.28 = 10.24 seconds
# before the inquiry is performed, bluez should flush its cache of
# previously discovered devices
flt = bluez.hci_filter_new()
bluez.hci_filter_all_events(flt)
bluez.hci_filter_set_ptype(flt, bluez.HCI_EVENT_PKT)
self.sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, flt )
while True:
pkt = self.sock.recv(255)
ptype, event, plen = struct.unpack("BBB", pkt[:3])
if event == bluez.EVT_INQUIRY_RESULT_WITH_RSSI:
i =0
elif event == bluez.EVT_NUM_COMP_PKTS:
i =0
elif event == bluez.EVT_DISCONN_COMPLETE:
i =0
elif event == LE_META_EVENT:
subevent, = struct.unpack("B", pkt[3])
pkt = pkt[4:]
if subevent == EVT_LE_CONN_COMPLETE:
le_handle_connection_complete(pkt)
elif subevent == EVT_LE_ADVERTISING_REPORT:
#print "advertising report"
num_reports = struct.unpack("B", pkt[0])[0]
report_pkt_offset = 0
for i in range(0, num_reports):
if (DEBUG == True):
print "-------------"
print "\t", "full packet: ", self.printpacket(pkt)
print "\t", "MAC address: ", self.packed_bdaddr_to_string(pkt[report_pkt_offset + 3:report_pkt_offset + 9])
# build the return string
id = pkt[report_pkt_offset +12: report_pkt_offset +26]
if (DEBUG == True):
print "\t", "id: ", id
if (id == 'MSOT_BLE_Demo:'):
# MAC address
macAddress = self.packed_bdaddr_to_string(pkt[report_pkt_offset + 3:report_pkt_offset + 9])
# string representation of Water Volume Content (unit-less) floating point value
value = pkt[report_pkt_offset + 26: report_pkt_offset + 36]
if (DEBUG == True):
print "\t", "address=", macAddress, " value=", value
if( self.callback != None ):
print "calling event handler"
self.callback( macAddress, value )
except:
self.sock.setsockopt( bluez.SOL_HCI, bluez.HCI_FILTER, old_filter )
print "error in BLE Listen loop"
sys.exit(1)
| StarcoderdataPython |
1604815 | <gh_stars>0
from django.test import TestCase
from django.contrib.auth.models import User
from .models import Profile,Post,Comment,Location,Category,Neighborhood,Business
# Tests
class ProfileTestClass(TestCase):
# Set up method
def setUp(self):
self.user = User.objects.create_user(username='testuser', password='<PASSWORD>')
self.profile = Profile(id=1,first_name='John',last_name='Doe',user = self.user,bio='test bio')
#Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.profile,Profile))
class PostTestClass(TestCase):
# Set up method
def setUp(self):
self.user = User.objects.create_user(username='testuser', password='<PASSWORD>')
self.post = Post(id=1,title='Test',post='This is a test',user = self.user)
#Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.post,Post))
class CommentTestClass(TestCase):
# Set up method
def setUp(self):
self.user = User.objects.create_user(username='testuser', password='<PASSWORD>')
self.post = Post(id=1,title='Test',content='This is a test',user = self.user)
self.comment = Comment(id=1,post=self.post,user=self.user)
#Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.comment,Comment))
class LocationTestClass(TestCase):
#Setup method
def setUp(self):
self.location = Location(id=1,name='Test name')
#Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.location,Location))
class CategoryTestClass(TestCase):
#Setup method
def setUp(self):
self.category = Category(id=1,name='Test name')
#Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.category,Category))
class NeighbourhoodTestClass(TestCase):
#Setup method
def setUp(self):
self.user = User.objects.create_user(username='testuser', password='<PASSWORD>')
self.location = Location(id=1,name='Test name')
self.neighbourhood = Neighbourhood(id=1,name='Test name',location=self.location,admin=self.user,occupants=1)
#Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.neighbourhood,Neighbourhood))
def test_create_neighbourhood(self):
self.location.save()
self.neighbourhood.create_neighbourhood()
self.assertTrue(len(Neighbourhood.objects.all()) > 0)
def test_delete_neighborhood(self):
self.location.save()
self.neighbourhood.create_neighbourhood()
self.neighbourhood = Neighbourhood.objects.get(id=1)
self.neighbourhood.delete_neighbourhood()
self.assertTrue(len(Neighbourhood.objects.all()) == 0)
def test_find_neighborhood(self):
self.location.save()
self.neighbourhood.create_neighborhood()
self.searched_neighbourhood = Neighbourhood.find_neighbourhood(1)
self.assertTrue(self.searched_neighbuorhood == self.neighbourhood)
def test_update_neighborhood(self):
self.location.save()
self.neighbourhood.create_neighbourhood()
self.neighbourhood = Neighbourhood.objects.get(id=1)
self.neighbourhood.name = 'Changed name'
self.neighbourhood.update_neighbourhood()
self.updated_neighbourhood = Neighbourhood.objects.get(id=1)
self.assertEqual(self.updated_neighbourhood.name,'Changed name')
def test_update_occupants(self):
self.location.save()
self.neighbourhood.create_neighbourhood()
self.neighbourhood = Neighbourhood.objects.get(id=1)
self.neighbourhood.update_occupants()
self.updated_neighbourhood = Neighbourhood.objects.get(id=1)
self.assertTrue(self.updated_neighbourhood.occupants == 2)
class BusinessTestClass(TestCase):
#Setup method
def setUp(self):
self.user = User.objects.create_user(username='testuser', password='<PASSWORD>')
self.location = Location(id=1,name='Test name')
self.neighbourhood = Neighbourhood(id=1,name='Test name',location=self.location,admin=self.user,occupants=1)
self.business = Business(id=1,name='Test name')
self.business = Business(id=1,name='Test',user=self.user,description='Test description',neighbourhood=self.neighbourhood,category=self.category,email='<EMAIL>')
self.location.save()
self.neighbourhood.save()
self.category.save()
self.business.save()
#Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.business,Business))
def test_create_business(self):
self.business.create_business()
self.assertTrue(len(Business.objects.all()) > 0)
def test_delete_business(self):
self.business.delete_business()
self.assertTrue(len(Business.objects.all()) == 0)
def test_find_business(self):
self.business = Business.find_business(1)
self.assertEqual(self.business.id, 1)
def test_update_business(self):
self.business = Business.find_business(1)
self.business.name = 'Changed name'
self.business.update_business()
self.updated_business = Business.find_business(1)
self.assertEqual(self.updated_business.name, 'Changed name') | StarcoderdataPython |
195639 | <gh_stars>0
import tkinter as tk
from PIL import ImageTk, Image
import numpy as np
import json
import matplotlib.pyplot as plt
from double_edges import choose_double_edges
from euler_path import euler_path
from path_variables import data_folder
CIRCLE_SIZE = 10
DOUBLE_EDGE_WIDTH = 5
BACKGROUND_FILENAME = data_folder / "map.png"
EDGES_FILENAME = data_folder / "edges.json"
ACTUAL_WIDTH = 5.6 # <NAME> & Red hill & Pinnacle
# ACTUAL_WIDTH = 2.8 # Campbell, <NAME>
# ACTUAL_WIDTH = 1.4 # <NAME>
class Radpath:
# We need this to be a class so that we can access the canvas from outside functions
def __init__(self):
root = tk.Tk()
# With this current system, you need to enter full screen mode for it to work properly
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
background_image = Image.open(BACKGROUND_FILENAME)
background_image = self.rescale_background(background_image, screen_width, screen_height)
self.background = ImageTk.PhotoImage(background_image)
self.canvas = tk.Canvas(root, width=screen_width, height=screen_height)
self.canvas.grid()
self.canvas.bind('<ButtonPress-1>', self.mouse_press)
self.canvas.bind('<B1-Motion>', self.mouse_drag)
self.canvas.bind('<ButtonRelease-1>', self.mouse_release)
self.canvas.bind('<KeyPress-Return>', self.calculate_route)
self.canvas.focus_set()
self.canvas.create_image(0, 0, image=self.background, anchor='nw')
self.nodes = []
self.node_drawings = []
self.edges = []
self.edge_drawings = []
self.number_drawings = []
self.double_edges = []
self.last_press = None
self.last_line = None
self.new_node = True
self.preload_edges()
root.mainloop()
def rescale_background(self, background_image, screen_width, screen_height):
"""Adjust the image size to use the full screen width/height but without distorting the image"""
screen_ratio = screen_width/screen_height
image_width = background_image.width
image_height = background_image.height
image_ratio = image_width/image_height
if image_ratio >= screen_ratio:
image_width = screen_width
image_height = image_width/image_ratio
else:
image_height = screen_height
image_width = image_height * image_ratio
background_image = background_image.resize((int(image_width), int(image_height)), Image.ANTIALIAS)
return background_image
def preload_edges(self):
"""Load from a previously saved set of edges"""
# Load in the edges from file if the file exists
try:
with open(EDGES_FILENAME, 'r') as file:
self.edges = json.load(file)
# Nodes need to be tuples for dictionary hashing to work
self.edges = [[tuple(edge[0]),tuple(edge[1])] for edge in self.edges]
except:
print("There is no edges.json file for preloading, so we are starting from scratch")
return
# Draw the edges
for edge in self.edges:
line = self.canvas.create_line(edge[0][0], edge[0][1], edge[1][0], edge[1][1])
self.edge_drawings.append(line)
# Extract and draw the nodes
nodes = set()
for edge in self.edges:
nodes.add(edge[0])
nodes.add(edge[1])
self.nodes = list(nodes)
for node in self.nodes:
circle = self.draw_node(node)
self.node_drawings.append(circle)
def mouse_press(self, event):
"""If we press somewhere that doesn't yet have a node, then place a node there"""
node = (event.x, event.y)
self.new_node = self.overlapping_node(node) is None
if self.new_node:
circle = self.draw_node(node)
self.nodes.append(node)
self.node_drawings.append(circle)
else:
node = self.overlapping_node(node)
self.last_press = node
def mouse_drag(self, event):
"""If we press and drag, then draw a line from the last press position to the current mouse position"""
self.canvas.delete(self.last_line)
self.last_line = self.canvas.create_line(self.last_press[0], self.last_press[1], event.x, event.y)
def mouse_release(self, event):
"""Create and delete a node and or edge based on this logic:
1. if the 1st and 2nd nodes are the same and the 1st node is new then ignore it, otherwise delete it
2. if the second node is new then create and draw it, if not then centre it
3. if the edge is old then delete it, if not then create and draw it"""
node1 = self.last_press
node2 = (event.x, event.y)
node2_centred = self.overlapping_node(node2)
# 1. if the 1st and 2nd nodes are the same and the 1st node is new then ignore it, otherwise delete it
if node1 == node2_centred:
if self.new_node:
self.canvas.delete(self.last_line)
return
# Don't delete a node if it's connected to an edge
edge_nodes = {node for edge in self.edges for node in edge}
if node1 in edge_nodes:
self.canvas.delete(self.last_line)
return
else:
index = self.nodes.index(node1)
del self.nodes[index]
self.canvas.delete(self.node_drawings[index])
del self.node_drawings[index]
self.canvas.delete(self.last_line)
return
# 2. if the second node is new then create and draw it, if not then centre it
if node2_centred is None:
circle = self.draw_node(node2)
self.nodes.append(node2)
self.node_drawings.append(circle)
else:
node2 = node2_centred
# 3. if the edge is old then delete it, if not then create and draw it
edge = [node1,node2]
reverse_edge = [node2, node1]
if edge in self.edges or reverse_edge in self.edges:
edge = edge if edge in self.edges else reverse_edge
index = self.edges.index(edge)
del self.edges[index]
self.canvas.delete(self.edge_drawings[index])
del self.edge_drawings[index]
else:
line = self.canvas.create_line(edge[0][0], edge[0][1], edge[1][0], edge[1][1])
self.edges.append(edge)
self.edge_drawings.append(line)
self.canvas.delete(self.last_line)
def overlapping_node(self, node):
"""returns the centre coordinates of the node that overlaps, or None if none overlap"""
for old_node in self.nodes:
euclidian_distance = np.sqrt(np.square(node[0] - old_node[0]) + np.square(node[1] - old_node[1]))
if euclidian_distance < CIRCLE_SIZE:
return old_node
return None
def draw_node(self, node):
"""Draw the node centred at the coordinate"""
return self.canvas.create_oval(node[0] - CIRCLE_SIZE / 2, node[1] - CIRCLE_SIZE / 2, node[0] + CIRCLE_SIZE / 2,
node[1] + CIRCLE_SIZE / 2)
# Need to handle exceptions, e.g. empty graph
def calculate_route(self, event):
"""Make the edges that need to be repeated get drawn in bold"""
if self.edges == []:
print("Cannot calculate route for an empty network")
return
for number_drawing in self.number_drawings:
self.canvas.delete(number_drawing)
self.double_edges = choose_double_edges(self.edges)
if self.double_edges is None:
print("Cannot generate route if the graph is disjoint")
return
self.path, self.colours = euler_path(self.edges, self.double_edges)
colour_map = plt.get_cmap('tab20').colors
rainbow = colour_map[6:8] + colour_map[2:6] + colour_map[0:2] + colour_map[8:]
colour_ints = [[int(c*255) for c in colour] for colour in rainbow]
colour_hex = ["#" + ''.join('%02x'%i for i in colour) for colour in colour_ints]
# Draw each edge with it's given colours
used_edges = set()
for i, edge in enumerate(self.path):
gradient = np.array(edge[1]) - np.array(edge[0])
unit_vector = gradient / np.linalg.norm(gradient)
rotation_matrix = [[0, 1], [-1, 0]]
if edge in used_edges:
rotation_matrix = [[0, -1], [1, 0]]
new_vector = np.dot(rotation_matrix, unit_vector)
dist = 3
x_change = dist * new_vector[0]
y_change = dist * new_vector[1]
self.canvas.create_line(edge[0][0] + x_change,
edge[0][1] + y_change,
edge[1][0] + x_change,
edge[1][1] + y_change,
width=DOUBLE_EDGE_WIDTH,
fill=colour_hex[self.colours[i]])
used_edges.add(edge)
self.preload_edges()
# Calculate the total length of the path
route_length = total_length(self.path, self.background.width(), ACTUAL_WIDTH)
print(f"Total length is about {round(route_length)}km, based on the 'ACTUAL_WIDTH'")
# Save the edges to file
with open(EDGES_FILENAME, 'w') as file:
json.dump(self.edges, file)
def total_length(edges, window_width, real_life_width):
"""Calculate the total length of the route"""
total_length = 0
for edge in edges:
total_length += np.linalg.norm(np.array(edge[0]) - np.array(edge[1])) # Euclidean distance
scaling_factor = real_life_width / window_width
actual_length = total_length * scaling_factor
return actual_length
if __name__ == '__main__':
Radpath() | StarcoderdataPython |
112958 | <filename>catfood.py
import getpass
import smtplib
from email.mime.text import MIMEText
from email.utils import formataddr
import urllib.request, urllib.parse, urllib.error
import ssl
import json
import time
import re
import os
import sys
# Email setting for notification
def Email(sender, password, recipient, emailsub, emailmsg, smtpsever, smtpport):
try:
msg = MIMEText(emailmsg, 'plain', 'utf-8')
msg['From'] = formataddr(['Catfood Reminder', sender])
msg['To'] = formataddr([recipient, recipient])
msg['Subject'] = emailsub
server = smtplib.SMTP_SSL(smtpsever, smtpport)
server.login(sender, password)
server.sendmail(sender,[recipient,],msg.as_string())
server.quit()
print('Succeed to send e-mail')
return True
except:
print('Failed to send e-mail')
def MacOsNotification(ostitle, osmsg):
if sys.platform == 'darwin':
os.system('osascript -e \'display notification "' + osmsg + '" sound name "default" with title "' + ostitle + '"\'')
def GetDobanTopic(keywords):
# Load saved topic data
try:
with open('record.json', 'r') as record_file:
record = json.load(record_file)
record_topics = record['topics']
lasttime = record['time']
record_file.close()
except:
record = dict()
record_topics = dict()
lasttime = "2020-01-01 00:00:00"
# Write new topic data
with open('record.json', 'w') as record_file:
# Request 1000pcs of topics from Douban
info = []
for i in range(0, 10):
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
# Request data in JSON format
count = 100
start = i * count
url = 'https://api.douban.com/v2/group/656297/topics?start=' + str(start) + '&count=' + str(count)
header = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36 Edg/79.0.309.56'}
req = urllib.request.Request(url = url, headers = header)
nowtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
try:
data = json.loads(urllib.request.urlopen(req, context = ctx).read())
except:
continue
# Filtrate concerned topics
for number in range(0, count):
topic = data['topics'][number]
content = topic['title'] + topic ['content']
if topic['updated'] <= lasttime:
break
if re.search(keywords, content, re.I|re.M|re.S) != None:
if topic['id'] not in record_topics.keys():
info.append(topic['updated'] + '\r\n' + topic['title'] + '\r\n' + topic['share_url'] + '\r\n' + '-' * 50)
print(topic['updated'] + '\n' + topic['title'] + '\n' + topic['share_url'] + '\n' + '-' * 50)
record_topics[topic['id']] = {'updated':topic['updated'], 'title':topic['title'], 'link':topic['share_url']}
if number < (count - 1):
break
record['time'] = nowtime
record['topics'] = record_topics
json.dump(record, record_file, ensure_ascii = False)
if len(info) == 0:
print('No new message ' + nowtime)
else:
message = str(len(info)) + ' new message(s) ' + nowtime
print(message)
MacOsNotification('Catfood Reminder', message)
Email(SenderAddress, Password, RecipientAddress, message, "\r\n".join(info), SMTPSever, SMTPPort)
record_file.close()
return
#Setup e-mail
while True:
# Login in E-mail
SenderAddress = input('Please input the sender\'s e-mail address: ')
Password = getpass.getpass('Please input the sender\'s e-mail password: ')
SMTPSever = input('Please input the sender\'s e-mail SMTP Sever address: ')
SMTPPort = input('Please input the sender\'s e-mail SMTP Port: ')
RecipientAddress = input('Please input the recipient\'s e-mail address: ')
#Test E-mail
testemail = Email(SenderAddress, Password, RecipientAddress, 'TEST MESSAGE', 'THIS IS TEST TEXT', SMTPSever, SMTPPort)
if testemail == True:
print('Valid e-mail setting, start searching...')
break
else:
print('Invalid e-mail setting is invalid, please retry')
# Search new topic every 10 min
while True:
GetDobanTopic(r'(开车).*?(go)') #change into your target keywords
print('Next search will start in 10 min')
time.sleep(600)
| StarcoderdataPython |
3396523 | <reponame>salvatorecalo/nebula8<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright SquirrelNetwork
from core import decorators
from core.handlers.logs import sys_loggers, telegram_loggers
from core.utilities.functions import (
ban_user_reply,
ban_user_by_username,
ban_user_by_id,
bot_object,
delete_message_reply)
from core.utilities.message import message
from core.utilities.strings import Strings
from core.utilities.monads import (Given,Try)
from languages.getLang import languages
def ban_error(update, context, username = None, id = None):
languages(update,context)
message(update,context,languages.ban_user_error % (username if username is not None else id))
def ban_success(update, context, chat, username = None, id = None):
languages(update,context)
message(update,context,languages.user_ban % (username if username is not None else id))
logs_text = "<b>#Log User Banned!</b>\nGroup: {}\nUser: {}".format(chat.title,username or id)
telegram_loggers(update,context,logs_text)
@decorators.admin.user_admin
@decorators.delete.init
def init(update, context):
languages(update,context)
bot = bot_object(update,context)
chat = update.effective_chat
reply = update.message.reply_to_message
if reply is not None:
if reply.from_user.id == bot.id:
message(update,context,languages.bot_ban)
else:
ban_text = languages.ban_message.format(
user = reply.from_user.username or reply.from_user.first_name,
userid = reply.from_user.id,
chat = chat.title
)
logs_text = Strings.BAN_LOG.format(
username = reply.from_user.username or reply.from_user.first_name,
id = reply.from_user.id,
chat = chat.title
)
delete_message_reply(update,context)
ban_user_reply(update,context)
message(update,context,ban_text)
telegram_loggers(update,context,logs_text)
formatter = "Ban eseguito da: {} nella chat {}".format(
update.message.from_user.id,
chat.title)
sys_loggers("[BAN_LOGS]",formatter,False,True)
else:
ban_argument = update.message.text[5:]
is_user_id = Try.of(lambda: int(ban_argument)).valueOf() is not None
if ban_argument[0] == '@':
username = ban_argument
Try.of(lambda: ban_user_by_username(update, context, username)) \
.catch(lambda err: ban_error(update, context, username = username)) \
.map(lambda x : ban_success(update, context, chat, username = username))
elif is_user_id:
userid = ban_argument
Try.of(lambda: ban_user_by_id(update, context, userid)) \
.catch(lambda err: ban_error(update, context, id = userid)) \
.map(lambda x : ban_success(update, context, chat, id = userid))
else:
message(update,context,languages.ban_error.format(ban_argument))
return | StarcoderdataPython |
92519 | <gh_stars>0
#!/usr/bin/env python
import pika
from kafka import KafkaConsumer
import json
import utm
import requests
import os
from xml.dom import minidom
import datetime
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='traffic_sign', exchange_type='topic')
consumer = KafkaConsumer('anomalies')
experiment_to_run = os.getenv("EXPERIMENT_SCENARIO", None)
if (experiment_to_run == "VALIDATION"):
map_path = "../../validation/inputs/hex_map.xml"
elif (experiment_to_run == "CITY_SCALE"):
map_path = "./map_complete.xml"
else:
raise Exception("""
To use this script, you should override the
EXPERIMENT_SCENARIO environment variable with
values "VALIDATION" or "CITY_SCALE"; i.e:
$ export EXPERIMENT_SCENARIO=VALIDATION
""")
def load_nodes():
dom = minidom.parse(map_path)\
.getElementsByTagName('node')
mylist = []
for u in dom:
mylist.append([
int(u.getAttribute('id')),
float(u.getAttribute('x')),
float(u.getAttribute('y'))
])
return mylist
def load_edges():
dom = minidom.parse(map_path)\
.getElementsByTagName('link')
results = {}
for u in dom:
results[int(u.getAttribute('id'))] = [
int(u.getAttribute('from')),
int(u.getAttribute('to'))
]
return results
print("Loading map nodes...")
nodes = {}
for u in load_nodes():
nodes[u[0]] = [u[1], u[2]]
print("Map nodes loaded...")
print("Loading map edges...")
edges = load_edges()
print("Map edges loaded...")
def from_xy_to_latlon(x, y):
url = "https://epsg.io/trans?data={0},{1}&s_srs=32719&t_srs=4326".format(x, y)
coords = requests.get(url).json()[0]
return [float(coords["y"]), float(coords["x"])]
def from_latlon_to_xy(lat, lon):
url = "https://epsg.io/trans?data={0},{1}&s_srs=4326&t_srs=32719".format(lat, lon)
coords = requests.get(url).json()[0]
return [float(coords["x"]), float(coords["y"])]
print("Ready to transfer data from Kafka to RabbitMQ...")
import csv
with open('round.csv', 'a', newline='') as fil:
mywriter = csv.writer(fil)
for msg in consumer:
print("Anomaly detected...")
payload = msg.value
anomaly = json.loads(payload)
edge_id = anomaly.get("edgeId", None)
edge_id = int(edge_id)
measure_id = anomaly.get("measure_id", None)
timestamp = anomaly.get("timestamp", None)
[from_id, to_id] = edges[edge_id]
coordinates = nodes[from_id]
host = "http://localhost:8000/discovery/"
endpoint = ("resources?capability=traffic_board&lat={0}&lon={1}&radius=1000"
.format(coordinates[0], coordinates[1]))
resp = requests.get(host + endpoint)
resources = json.loads(resp.text)["resources"]
for r in resources:
board_id = r.get("description")
if (board_id == None):
raise Exception("""
Your board resources are incorrect. In their description
you must have their ids.
""")
message = "%s.%s.%s" % (board_id, from_id, to_id)
channel.basic_publish(exchange='traffic_sign',
routing_key='#',
body=message)
message = "%s.%s.%s" % (board_id, to_id, from_id)
channel.basic_publish(exchange='traffic_sign',
routing_key='#',
body=message)
new_timestamp = datetime.datetime.utcnow().isoformat()
print("writing...")
fil.write("{0};{1};{2}\n".format(measure_id, timestamp, new_timestamp))
| StarcoderdataPython |
3300825 | import snekspec.core as s
import hypothesis.strategies as hst
import hypothesis as h
def _spec():
rating_spec = s.is_float()
good_rating_spec = s.and_(rating_spec,
s.PredSpec(lambda x: x > 0.6))
return s.keys({'first': s.is_any(),
'last': s.is_string(),
'ratings': s.coll_of(good_rating_spec),
'career_span': s.tuple_(s.is_int(), s.is_int())})
class TestExamples:
def test_valid_obj(self):
obj = {'first': 'Kamaal',
'last': 'Fareed',
'ratings': [0.9, 0.7, 0.9],
'career_span': (1990, 2019)}
assert [] == s.explain(_spec(), obj)
assert s.is_valid(_spec(), obj)
def test_missing_keys(self):
obj = {'first': 'Q-Tip'}
assert [] != s.explain(_spec(), obj)
assert not s.is_valid(_spec(), obj)
def test_invalid_value(self):
obj = {'first': 'KRS',
'last': 1,
'ratings': [0.8, 0.7, 0.9]}
assert [] != s.explain(_spec(), obj)
assert not s.is_valid(_spec(), obj)
def test_invalid_tuple_value(self):
obj = {'first': 'Kamaal',
'last': 'Fareed',
'ratings': [0.9, 0.7, 0.9],
'career_span': (1990, '*')}
assert [] != s.explain(_spec(), obj)
assert not s.is_valid(_spec(), obj)
def test_invalid_tuple_size(self):
obj = {'first': 'Kamaal',
'last': 'Fareed',
'ratings': [0.9, 0.7, 0.9],
'career_span': (1990, )}
assert [] != s.explain(_spec(), obj)
assert not s.is_valid(_spec(), obj)
def test_invalid_nested_value(self):
obj = {'first': 'KRS',
'last': '1',
'ratings': [0.99, 0.7, 0.8, 0.5]}
assert [] != s.explain(_spec(), obj)
assert not s.is_valid(_spec(), obj)
def test_invalid_none(self):
obj = None
assert [] != s.explain(_spec(), obj)
assert not s.is_valid(_spec(), obj)
def test_none_with_nilable(self):
obj = None
spec = s.nilable(_spec())
assert [] == s.explain(spec, obj)
assert s.is_valid(spec, obj)
class TestStrategyGeneratesValidValue:
@h.settings(deadline=1000.0)
@h.given(hst.data())
def test_keys(self, data):
spec = s.keys({'a': s.is_string(),
'b': s.nilable(s.is_string())})
val = data.draw(spec.strategy())
assert s.is_valid(spec, val)
@h.given(hst.data())
def test_coll_of(self, data):
spec = s.coll_of(s.nilable(s.is_string()))
val = data.draw(spec.strategy())
assert s.is_valid(spec, val)
@h.given(hst.data())
def test_is_int(self, data):
spec = s.and_(s.is_int())
val = data.draw(spec.strategy())
assert s.is_valid(spec, val)
@h.given(hst.data())
def test_is_int(self, data):
spec = s.and_(s.is_int({'min_value': 0,
'max_value': 50}),
s.is_int({'min_value': 50,
'max_value': 100}))
val = data.draw(spec.strategy())
assert s.is_valid(spec, val)
| StarcoderdataPython |
86187 | """DISTRO IGNITER CLI
Usage:
pycli start
pycli -h|--help
pycli -v|--version
Options:
start Starts the CLI
-h --help Display the available commands
-v --version Display CLI version
"""
from __future__ import print_function, unicode_literals
from docopt import docopt
import os
import glob
from PyInquirer import style_from_dict, Token, prompt
import subprocess
__version__ = 'v0.0.1'
feature_options = [
{
"name": "Python",
"checked": True,
"script": "python_installation.sh"
},
{
"name": "MySQL",
"checked": True,
"script": "mysql_installation.sh"
},
{
"name": "Redis",
"checked": True,
"script": "redis_installation.sh"
},
{
"name": "Snap Packages",
"checked": True,
"script": "snap_packages_installation.sh"
},
{
"name": "Docker",
"checked": True,
"script": "docker_installation.sh"
},
{
"name": "Node.JS",
"checked": True,
"script": "nodejs_installation.sh"
},
{
"name": "VSCode Extensions",
"checked": True,
"script": "vscode_extensions_installation.sh"
},
{
"name": "Theme",
"checked": True,
"script": "style_installation.sh"
},
]
checkbox_input = [
{
'type': 'checkbox',
'message': 'Select features to install',
'name': 'features',
'choices': feature_options,
'validate': lambda answer: 'You must choose at least one feature.'
if len(answer) == 0 else True
}
]
# Styling the CLI
checkbox_input_style = style_from_dict({
Token.Separator: '#cc5454',
Token.QuestionMark: '#673ab7 bold',
Token.Selected: '#cc5454', # default
Token.Pointer: '#673ab7 bold',
Token.Instruction: '', # default
Token.Answer: <PASSWORD>',
Token.Question: '',
})
def start():
answers = prompt(checkbox_input, style=checkbox_input_style).get(
'features', [])
features = [
feature for feature_index, feature
in enumerate(feature_options)
if feature.get("name") in answers
]
# subprocess.call("./starter.sh", shell=True)
for feature in features:
scriptUrl = './scripts/' + feature.get('script', '')
subprocess.call(scriptUrl, shell=True)
if __name__ == '__main__':
arguments = docopt(__doc__, version=__version__)
if arguments['start']:
start()
else:
print(arguments)
| StarcoderdataPython |
120523 | <filename>flasky.py
import os
from app import create_app, db
from app.models import User,Cult,Rating,Post
from flask_migrate import Migrate
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
migrate = Migrate(app, db)
@app.shell_context_processor
def make_shell_context():
return dict(db=db, User=User, Cult=Cult,Post=Post,Rating=Rating)
| StarcoderdataPython |
1601847 | from abstract_factory import CommandHandler
import random, string
class User:
"""
To instantiate and store users.
Cool things you can do:
- Add a different neo4j url_location for each user
- each user has their own command handler
"""
def __init__(self, name,
id: str = None,
loc: str = 'Bangalore',
command_handler = CommandHandler) -> None:
self.name = name
self.id = self.generate_id(id)
self.loc = loc
self.command_handler = command_handler()
def generate_id(self, id):
if id is None:
return "".join(random.choices(string.digits, k=12))
return id
| StarcoderdataPython |
170709 | <gh_stars>0
def test_client_can_get_avatars(client):
resp = client.get('/api/avatars')
assert resp.status_code == 200
def test_client_gets_correct_avatars_fields(client):
resp = client.get('/api/avatars')
assert 'offset' in resp.json
assert resp.json['offset'] is None
assert 'total' in resp.json
assert 'data' in resp.json
assert resp.json['total'] == len(resp.json['data'])
assert {
'avatar_id',
'category',
'uri',
'created_at',
'updated_at',
} == set(resp.json['data'][0].keys())
def test_client_filters_avatars_fields(client):
resp = client.get('/api/avatars?fields=category,created_at')
avatars = resp.json['data']
assert {
'category',
'created_at',
} == set(avatars[0].keys())
def test_client_offsets_avatars(client):
resp_1 = client.get('/api/avatars')
resp_2 = client.get('/api/avatars?offset=2')
assert len(resp_1.json['data']) \
== len(resp_2.json['data']) + min(2, len(resp_1.json['data']))
def test_client_limits_avatars(client):
resp_1 = client.get('/api/avatars?max_n_results=1')
resp_2 = client.get('/api/avatars?max_n_results=2')
assert len(resp_1.json['data']) <= 1
assert len(resp_2.json['data']) <= 2
def test_logged_off_client_cannot_create_avatar(client):
resp = client.post('/api/avatars',
data={
'uri': 'http://newavatars.com/img.png',
'category': 'dummy',
}
)
assert resp.status_code == 401
def test_logged_in_user_cannot_create_avatar(client_with_tok):
resp = client_with_tok.post('/api/avatars',
data={
'uri': 'http://newavatars.com/img.png',
'category': 'dummy',
}
)
assert resp.status_code == 401
def test_logged_in_mod_cannot_create_avatar(mod_with_tok):
resp = mod_with_tok.post('/api/avatars',
data={
'uri': 'http://newavatars.com/img.png',
'category': 'dummy',
}
)
assert resp.status_code == 401
def test_logged_in_admin_can_create_avatar(admin_with_tok):
resp = admin_with_tok.post('/api/avatars',
data={
'uri': 'http://newavatars.com/img.png',
'category': 'dummy',
}
)
assert resp.status_code == 200
def test_logged_in_admin_gets_correct_data_on_user_creation(admin_with_tok):
resp = admin_with_tok.post('/api/avatars',
data={
'uri': 'http://newavatars.com/img.png',
'category': 'dummy',
}
)
assert 'data' in resp.json
assert resp.json['data']['uri'] == 'http://newavatars.com/img.png'
assert resp.json['data']['category'] == 'dummy'
def test_client_can_get_avatar(client, avatar_id):
resp = client.get('/api/avatars/{}'.format(avatar_id))
assert resp.status_code == 200
assert 'data' in resp.json
def test_client_gets_correct_avatar_fields(client, avatar_id):
resp = client.get('/api/avatars/{}'.format(avatar_id))
assert 'data' in resp.json
assert {
'avatar_id',
'category',
'uri',
'created_at',
'updated_at',
} == set(resp.json['data'].keys())
def test_logged_off_client_cannot_edit_avatar(client, avatar_id):
resp = client.put('/api/avatars/{}'.format(avatar_id),
data={
'uri': 'http://newavatars.com/newimg.png',
}
)
assert resp.status_code == 401
def test_logged_in_user_cannot_edit_avatar(client_with_tok, avatar_id):
resp = client_with_tok.put('/api/avatars/{}'.format(avatar_id),
data={
'uri': 'http://newavatars.com/newimg.png',
}
)
assert resp.status_code == 401
def test_logged_in_mod_cannot_edit_avatar(mod_with_tok, avatar_id):
resp = mod_with_tok.put('/api/avatars/{}'.format(avatar_id),
data={
'uri': 'http://newavatars.com/newimg.png',
}
)
assert resp.status_code == 401
def test_logged_in_admin_can_edit_avatar(admin_with_tok, avatar_id):
resp = admin_with_tok.put('/api/avatars/{}'.format(avatar_id),
data={
'uri': 'http://newavatars.com/img.png',
'category': 'dummy',
}
)
assert resp.status_code == 200
def test_logged_in_admin_gets_correct_put_fields(admin_with_tok, avatar_id):
resp = admin_with_tok.put('/api/avatars/{}'.format(avatar_id),
data={
'category': 'newcategory',
}
)
assert 'data' in resp.json
assert {
'avatar_id',
'category',
'uri',
'created_at',
'updated_at',
} == set(resp.json['data'].keys())
def test_logged_in_admin_corretly_edits_avatar(admin_with_tok, avatar_id):
resp_1 = admin_with_tok.get('/api/avatars/{}'.format(avatar_id))
resp_2 = admin_with_tok.put('/api/avatars/{}'.format(avatar_id),
data={
'category': resp_1.json['data']['category'] + '_altered',
'uri': resp_1.json['data']['uri'] + '.png',
}
)
resp_3 = admin_with_tok.get('/api/avatars/{}'.format(avatar_id))
assert resp_1.status_code == 200
assert resp_2.status_code == 200
assert resp_3.status_code == 200
assert resp_3.json['data']['category'] \
== resp_1.json['data']['category'] + '_altered'
assert resp_3.json['data']['uri'] \
== resp_1.json['data']['uri'] + '.png'
def test_logged_off_client_cannot_delete_avatar(client, avatar_id):
resp = client.delete('/api/avatars/{}'.format(avatar_id))
assert resp.status_code == 401
def test_logged_in_user_cannot_delete_avatar(client_with_tok, avatar_id):
resp = client_with_tok.delete('/api/avatars/{}'.format(avatar_id))
assert resp.status_code == 401
def test_logged_in_mod_cannot_delete_avatar(mod_with_tok, avatar_id):
resp = mod_with_tok.delete('/api/avatars/{}'.format(avatar_id))
assert resp.status_code == 401
def test_logged_in_admin_can_delete_avatar(admin_with_tok, avatar_id):
resp = admin_with_tok.delete('/api/avatars/{}'.format(avatar_id))
assert resp.status_code == 204
def test_logged_in_admin_corretly_deletes_avatar(admin_with_tok, avatar_id):
resp_1 = admin_with_tok.get('/api/avatars/{}'.format(avatar_id))
resp_2 = admin_with_tok.delete('/api/avatars/{}'.format(avatar_id))
resp_3 = admin_with_tok.get('/api/avatars/{}'.format(avatar_id))
assert resp_1.status_code == 200
assert resp_2.status_code == 204
assert resp_3.status_code == 404
| StarcoderdataPython |
195408 | <filename>tests/test_arwn_collect.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_arwn
----------------------------------
Tests for `arwn` module.
"""
import sys
import mock
import testtools
import unittest
from arwn.cmd import collect
from . import arwn_fixtures
class TestArwnCollect(testtools.TestCase):
@mock.patch('arwn.cmd.collect.event_loop')
def test_start_in_forground(self, evloop):
cfg = arwn_fixtures.SampleConfig()
stdout = arwn_fixtures.CaptureStdout()
self.useFixture(stdout)
self.useFixture(cfg)
testargs = ["collect", "-f", "-c", cfg.path]
with mock.patch.object(sys, 'argv', testargs):
collect.main()
self.assertIn("[DEBUG] root: Starting arwn in foreground",
str(stdout))
self.assertTrue(evloop.called, "Eventloop not called")
if __name__ == '__main__':
sys.exit(unittest.main())
| StarcoderdataPython |
3315428 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Measurement smoke test to make sure that no new action_name_to_run is
defined."""
import os
import optparse
import logging
import unittest
from measurements import rasterize_and_record_micro
from telemetry import benchmark as benchmark_module
from telemetry.core import discover
from telemetry.page import page_test
from telemetry.unittest_util import options_for_unittests
from telemetry.util import classes
from telemetry.web_perf import timeline_based_measurement
# Do NOT add new items to this list!
# crbug.com/418375
_ACTION_NAMES_WHITE_LIST = (
'RunPageInteractions',
)
def _GetAllPossiblePageTestInstances():
page_test_instances = []
measurements_dir = os.path.dirname(__file__)
top_level_dir = os.path.dirname(measurements_dir)
benchmarks_dir = os.path.join(top_level_dir, 'benchmarks')
# Get all page test instances from measurement classes that are directly
# constructable
all_measurement_classes = discover.DiscoverClasses(
measurements_dir, top_level_dir, page_test.PageTest).values()
for measurement_class in all_measurement_classes:
if classes.IsDirectlyConstructable(measurement_class):
page_test_instances.append(measurement_class())
all_benchmarks_classes = discover.DiscoverClasses(
benchmarks_dir, top_level_dir, benchmark_module.Benchmark).values()
# Get all page test instances from defined benchmarks.
# Note: since this depends on the command line options, there is no guaranteed
# that this will generate all possible page test instances but it's worth
# enough for smoke test purpose.
for benchmark_class in all_benchmarks_classes:
options = options_for_unittests.GetCopy()
parser = optparse.OptionParser()
benchmark_class.AddCommandLineArgs(parser)
benchmark_module.AddCommandLineArgs(parser)
benchmark_class.SetArgumentDefaults(parser)
options.MergeDefaultValues(parser.get_default_values())
pt = benchmark_class().CreatePageTest(options)
if not isinstance(pt, timeline_based_measurement.TimelineBasedMeasurement):
page_test_instances.append(pt)
return page_test_instances
class MeasurementSmokeTest(unittest.TestCase):
# TODO(nednguyen): Remove this test when crbug.com/418375 is marked fixed.
def testNoNewActionNameToRunUsed(self):
invalid_tests = []
for test in _GetAllPossiblePageTestInstances():
if isinstance(test, rasterize_and_record_micro.RasterizeAndRecordMicro):
continue
if not hasattr(test, 'action_name_to_run'):
invalid_tests.append(test)
logging.error('Test %s missing action_name_to_run attribute.',
test.__class__.__name__)
if test.action_name_to_run not in _ACTION_NAMES_WHITE_LIST:
invalid_tests.append(test)
logging.error('Page test %s has invalid action_name_to_run: %s' %
(test.__class__.__name__, repr(test.action_name_to_run)))
self.assertFalse(
invalid_tests,
'New page tests with invalid action_name_to_run found. Please only use '
'action_name_to_run="RunPageInteractions" (crbug.com/418375).')
| StarcoderdataPython |
1681486 | <filename>bloodhound_search/bhsearch/tests/whoosh_backend.py
# -*- coding: UTF-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
import os
import shutil
import tempfile
from trac.util.datefmt import FixedOffset, utc
from bhsearch.api import ASC, DESC, SCORE, SortInstruction
from bhsearch.query_parser import DefaultQueryParser
from bhsearch.tests import unittest
from bhsearch.tests.base import BaseBloodhoundSearchTest
from bhsearch.whoosh_backend import WhooshBackend
from whoosh import index, query, sorting
from whoosh.fields import ID, KEYWORD, TEXT, Schema
from whoosh.qparser import MultifieldParser, MultifieldPlugin, PhrasePlugin, \
QueryParser, WhitespacePlugin
class WhooshBackendTestCase(BaseBloodhoundSearchTest):
def setUp(self):
super(WhooshBackendTestCase, self).setUp()
self.whoosh_backend = WhooshBackend(self.env)
self.whoosh_backend.recreate_index()
self.parser = DefaultQueryParser(self.env)
def test_can_retrieve_docs(self):
self.whoosh_backend.add_doc(dict(id="1", type="ticket"))
self.whoosh_backend.add_doc(dict(id="2", type="ticket"))
result = self.whoosh_backend.query(
query.Every(),
sort = [SortInstruction("id", ASC)],
)
self.print_result(result)
self.assertEqual(2, result.hits)
docs = result.docs
self.assertEqual(
{'id': u'1', 'type': u'ticket', 'unique_id': u'ticket:1',
'score': 0},
docs[0])
self.assertEqual(
{'id': u'2', 'type': u'ticket', 'unique_id': u'ticket:2',
'score': 1},
docs[1])
def test_can_return_all_fields(self):
self.whoosh_backend.add_doc(dict(id="1", type="ticket"))
result = self.whoosh_backend.query(query.Every())
self.print_result(result)
docs = result.docs
self.assertEqual(
{'id': u'1', 'type': u'ticket', 'unique_id': u'ticket:1',
"score": 1.0},
docs[0])
def test_can_select_fields(self):
self.whoosh_backend.add_doc(dict(id="1", type="ticket"))
result = self.whoosh_backend.query(query.Every(),
fields=("id", "type"))
self.print_result(result)
docs = result.docs
self.assertEqual(
{'id': '1', 'type': 'ticket'},
docs[0])
def test_can_survive_after_restart(self):
self.whoosh_backend.add_doc(dict(id="1", type="ticket"))
whoosh_backend2 = WhooshBackend(self.env)
whoosh_backend2.add_doc(dict(id="2", type="ticket"))
result = whoosh_backend2.query(query.Every())
self.assertEqual(2, result.hits)
def test_can_apply_multiple_sort_conditions_asc(self):
self.whoosh_backend.add_doc(dict(id="2", type="ticket2"))
self.whoosh_backend.add_doc(dict(id="3", type="ticket1"))
self.whoosh_backend.add_doc(dict(id="4", type="ticket3"))
self.whoosh_backend.add_doc(dict(id="1", type="ticket1"))
result = self.whoosh_backend.query(
query.Every(),
sort = [SortInstruction("type", ASC), SortInstruction("id", ASC)],
fields=("id", "type"),
)
self.print_result(result)
self.assertEqual([{'type': 'ticket1', 'id': '1'},
{'type': 'ticket1', 'id': '3'},
{'type': 'ticket2', 'id': '2'},
{'type': 'ticket3', 'id': '4'}],
result.docs)
def test_can_apply_multiple_sort_conditions_desc(self):
self.whoosh_backend.add_doc(dict(id="2", type="ticket2"))
self.whoosh_backend.add_doc(dict(id="3", type="ticket1"))
self.whoosh_backend.add_doc(dict(id="4", type="ticket3"))
self.whoosh_backend.add_doc(dict(id="1", type="ticket1"))
result = self.whoosh_backend.query(
query.Every(),
sort = [SortInstruction("type", ASC), SortInstruction("id", DESC)],
fields=("id", "type"),
)
self.print_result(result)
self.assertEqual([{'type': 'ticket1', 'id': '3'},
{'type': 'ticket1', 'id': '1'},
{'type': 'ticket2', 'id': '2'},
{'type': 'ticket3', 'id': '4'}],
result.docs)
def test_can_sort_by_score_and_date(self):
the_first_date = datetime(2012, 12, 1)
the_second_date = datetime(2012, 12, 2)
the_third_date = datetime(2012, 12, 3)
exact_match_string = "texttofind"
not_exact_match_string = "texttofind bla"
self.whoosh_backend.add_doc(dict(
id="1",
type="ticket",
summary=not_exact_match_string,
time=the_first_date,
))
self.whoosh_backend.add_doc(dict(
id="2",
type="ticket",
summary=exact_match_string,
time=the_second_date,
))
self.whoosh_backend.add_doc(dict(
id="3",
type="ticket",
summary=not_exact_match_string,
time=the_third_date,
))
self.whoosh_backend.add_doc(dict(
id="4",
type="ticket",
summary="some text out of search scope",
time=the_third_date,
))
parsed_query = self.parser.parse("summary:texttofind")
result = self.whoosh_backend.query(
parsed_query,
sort = [
SortInstruction(SCORE, ASC),
SortInstruction("time", DESC)
],
)
self.print_result(result)
self.assertEqual(3, result.hits)
docs = result.docs
#must be found first, because the highest score (of exact match)
self.assertEqual("2", docs[0]["id"])
#must be found second, because the time order DESC
self.assertEqual("3", docs[1]["id"])
#must be found third, because the time order DESC
self.assertEqual("1", docs[2]["id"])
def test_can_do_facet_count(self):
self.whoosh_backend.add_doc(dict(id="1", type="ticket", product="A"))
self.whoosh_backend.add_doc(dict(id="2", type="ticket", product="B"))
self.whoosh_backend.add_doc(dict(id="3", type="wiki", product="A"))
result = self.whoosh_backend.query(
query.Every(),
sort = [SortInstruction("type", ASC), SortInstruction("id", DESC)],
fields=("id", "type"),
facets= ("type", "product")
)
self.print_result(result)
self.assertEqual(3, result.hits)
facets = result.facets
self.assertEqual({"ticket":2, "wiki":1}, facets["type"])
self.assertEqual({"A":2, "B":1}, facets["product"])
def test_can_do_facet_if_filed_missing_TODO(self):
self.whoosh_backend.add_doc(dict(id="1", type="ticket"))
self.whoosh_backend.add_doc(dict(id="2", type="ticket", status="New"))
result = self.whoosh_backend.query(
query.Every(),
facets= ("type", "status")
)
self.print_result(result)
self.assertEqual(2, result.hits)
facets = result.facets
self.assertEqual({"ticket":2}, facets["type"])
self.assertEqual({None: 1, 'New': 1}, facets["status"])
def test_can_return_empty_result(self):
result = self.whoosh_backend.query(
query.Every(),
sort = [SortInstruction("type", ASC), SortInstruction("id", DESC)],
fields=("id", "type"),
facets= ("type", "product")
)
self.print_result(result)
self.assertEqual(0, result.hits)
def test_can_search_time_with_utc_tzinfo(self):
time = datetime(2012, 12, 13, 11, 8, 34, 711957,
tzinfo=FixedOffset(0, 'UTC'))
self.whoosh_backend.add_doc(dict(id="1", type="ticket", time=time))
result = self.whoosh_backend.query(query.Every())
self.print_result(result)
self.assertEqual(time, result.docs[0]["time"])
def test_can_search_time_without_tzinfo(self):
time = datetime(2012, 12, 13, 11, 8, 34, 711957, tzinfo=None)
self.whoosh_backend.add_doc(dict(id="1", type="ticket", time=time))
result = self.whoosh_backend.query(query.Every())
self.print_result(result)
self.assertEqual(time.replace(tzinfo=utc), result.docs[0]["time"])
def test_can_search_time_with_non_utc_tzinfo(self):
hours = 8
tz_diff = 1
time = datetime(2012, 12, 13, 11, hours, 34, 711957,
tzinfo=FixedOffset(tz_diff, "just_one_timezone"))
self.whoosh_backend.add_doc(dict(id="1", type="ticket", time=time))
result = self.whoosh_backend.query(query.Every())
self.print_result(result)
self.assertEqual(datetime(2012, 12, 13, 11, hours-tz_diff, 34, 711957,
tzinfo=utc), result.docs[0]["time"])
def test_can_apply_filter_and_facet(self):
self.whoosh_backend.add_doc(dict(id="1", type="ticket"))
self.whoosh_backend.add_doc(dict(id="2", type="wiki" ))
result = self.whoosh_backend.query(
query.Every(),
filter=query.Term("type", "ticket"),
facets=["type"]
)
self.print_result(result)
self.assertEqual(1, result.hits)
self.assertEqual("ticket", result.docs[0]["type"])
@unittest.skip("TODO clarify behavior on Whoosh mail list")
def test_can_search_id_and_summary_TODO(self):
#arrange
self.insert_ticket("test x")
self.insert_ticket("test 1")
fieldboosts = dict(
id = 1,
summary = 1,
)
mfp = MultifieldPlugin(list(fieldboosts.keys()),)
pins = [WhitespacePlugin,
PhrasePlugin,
mfp]
parser = QueryParser(None, WhooshBackend.SCHEMA, plugins=pins)
parsed_query = parser.parse("1")
result = self.whoosh_backend.query(parsed_query)
self.print_result(result)
self.assertEqual(2, result.hits)
def test_no_index_error_when_counting_facet_on_missing_field(self):
"""
Whoosh 2.4.1 raises "IndexError: list index out of range"
when search contains facets on field that is missing in at least one
document in the index. The error manifests only when index contains
more than one segment
Introduced workaround should solve this problem.
"""
#add more tickets to make sure we have more than one segment in index
count = 20
for i in range(count):
self.insert_ticket("test %s" % (i))
result = self.whoosh_backend.query(
query.Every(),
facets=["milestone"]
)
self.assertEquals(count, result.hits)
def test_can_query_missing_field_and_type(self):
self.whoosh_backend.add_doc(dict(id="1", type="ticket"))
self.whoosh_backend.add_doc(dict(id="2", type="ticket", milestone="A"))
self.whoosh_backend.add_doc(dict(id="3", type="wiki"))
filter = self.parser.parse_filters(["NOT (milestone:*)", "type:ticket"])
result = self.whoosh_backend.query(
query.Every(),
filter=filter,
)
self.print_result(result)
self.assertEqual(1, result.hits)
self.assertEqual("1", result.docs[0]["id"])
def test_can_query_missing_field(self):
self.whoosh_backend.add_doc(dict(id="1", type="ticket"))
self.whoosh_backend.add_doc(dict(id="2", type="ticket", milestone="A"))
filter = self.parser.parse_filters(["NOT (milestone:*)"])
result = self.whoosh_backend.query(
query.Every(),
filter=filter,
)
self.print_result(result)
self.assertEqual(1, result.hits)
self.assertEqual("1", result.docs[0]["id"])
@unittest.skip("TODO clarify behavior on Whoosh mail list")
def test_can_query_missing_field_and_type_with_no_results(self):
self.whoosh_backend.add_doc(dict(id="1", type="ticket"))
self.whoosh_backend.add_doc(dict(id="3", type="wiki"))
filter = self.parser.parse_filters(["NOT (milestone:*)", "type:ticket"])
result = self.whoosh_backend.query(
query.Every(),
filter=filter,
)
self.print_result(result)
self.assertEqual(0, result.hits)
def test_can_highlight_given_terms(self):
term = 'search_term'
text = "foo foo %s bar bar" % term
self.whoosh_backend.add_doc(dict(id="1", type="ticket", content=text))
self.whoosh_backend.add_doc(dict(id="3", type="wiki", content=text))
search_query = self.parser.parse(term)
result = self.whoosh_backend.query(
search_query,
highlight=True,
highlight_fields=['content', 'summary']
)
self.print_result(result)
self.assertEqual(len(result.highlighting), 2)
for highlight in result.highlighting:
self.assertIn(self._highlighted(term), highlight['content'])
self.assertEquals("", highlight['summary'])
def test_that_highlighting_escapes_html(self):
term = 'search_term'
text = "bla <a href=''>%s bar</a> bla" % term
self.whoosh_backend.add_doc(dict(id="1", type="ticket", content=text))
search_query = self.parser.parse(term)
result = self.whoosh_backend.query(
search_query,
highlight=True,
highlight_fields=['content']
)
self.print_result(result)
self.assertEqual(len(result.highlighting), 1)
highlight = result.highlighting[0]
self.assertEquals(
"bla <a href=''><em>search_term</em> bar</a> bla",
highlight['content'])
def test_highlights_all_text_fields_by_default(self):
term = 'search_term'
text = "foo foo %s bar bar" % term
self.whoosh_backend.add_doc(dict(id="1", type="ticket", content=text))
self.whoosh_backend.add_doc(dict(id="3", type="wiki", content=text))
search_query = self.parser.parse(term)
result = self.whoosh_backend.query(
search_query,
highlight=True,
)
self.print_result(result)
self.assertEqual(len(result.highlighting), 2)
for highlight in result.highlighting:
self.assertIn('content', highlight)
self.assertIn('summary', highlight)
self.assertIn(self._highlighted(term), highlight['content'])
def test_only_highlights_terms_in_fields_that_match_query(self):
term = 'search_term'
self.whoosh_backend.add_doc(dict(id=term, type="wiki", content=term))
self.whoosh_backend.add_doc(dict(id=term, type="ticket", summary=term))
search_query = self.parser.parse('id:%s' % term)
result = self.whoosh_backend.query(
search_query,
highlight=True,
highlight_fields=["id", "content", "summary"]
)
self.print_result(result)
self.assertEqual(len(result.highlighting), 2)
for highlight in result.highlighting:
self.assertIn(self._highlighted(term), highlight['id'])
self.assertNotIn(self._highlighted(term), highlight['summary'])
self.assertNotIn(self._highlighted(term), highlight['content'])
def _highlighted(self, term):
return '<em>%s</em>' % term
class WhooshIndexCreationTests(BaseBloodhoundSearchTest):
def setUp(self):
super(WhooshIndexCreationTests, self).setUp()
self.index_dir = os.path.join(self.env.path, 'whoosh_index')
if not os.path.exists(self.index_dir):
os.mkdir(self.index_dir)
def test_does_not_automatically_create_index(self):
whoosh_backend = WhooshBackend(self.env)
self.assertIs(whoosh_backend.index, None)
self.assertEqual(whoosh_backend.is_index_outdated(), True)
whoosh_backend.recreate_index()
self.assertEqual(whoosh_backend.is_index_outdated(), False)
self.assertIsNot(whoosh_backend.index, None)
def test_detects_that_index_needs_upgrade(self):
wrong_schema = Schema(content=TEXT())
index.create_in(self.index_dir, schema=wrong_schema)
whoosh_backend = WhooshBackend(self.env)
self.assertEqual(whoosh_backend.is_index_outdated(), True)
whoosh_backend.recreate_index()
self.assertEqual(whoosh_backend.is_index_outdated(), False)
class WhooshFunctionalityTestCase(unittest.TestCase):
def setUp(self):
self.index_dir = tempfile.mkdtemp('whoosh_index')
def tearDown(self):
shutil.rmtree(self.index_dir)
def test_groupedby_empty_field(self):
schema = Schema(
unique_id=ID(stored=True, unique=True),
id=ID(stored=True),
type=ID(stored=True),
status=KEYWORD(stored=True),
content=TEXT(stored=True),
)
ix = index.create_in(self.index_dir, schema=schema)
with ix.writer() as w:
w.add_document(unique_id=u"1", type=u"type1")
w.add_document(unique_id=u"2", type=u"type2", status=u"New")
facet_fields = (u"type", u"status" )
groupedby = facet_fields
with ix.searcher() as s:
r = s.search(
query.Every(),
groupedby=groupedby,
maptype=sorting.Count,
)
facets = self._load_facets(r)
self.assertEquals(
{'status': {None: 1, 'New': 1}, 'type': {'type1': 1, 'type2': 1}},
facets)
def _load_facets(self, non_paged_results):
facet_names = non_paged_results.facet_names()
if not facet_names:
return None
facets_result = dict()
for name in facet_names:
facets_result[name] = non_paged_results.groups(name)
return facets_result
def test_can_auto_commit(self):
# pylint: disable=unused-argument
schema = Schema(
unique_id=ID(stored=True, unique=True),
type=ID(stored=True),
)
ix = index.create_in(self.index_dir, schema=schema)
with ix.writer() as w:
w.add_document(unique_id=u"1", type=u"type1")
w.add_document(unique_id=u"2", type=u"type2")
with ix.searcher() as s:
results = s.search(query.Every())
self.assertEquals(2, len(results))
def test_can_auto_cancel(self):
schema = Schema(
unique_id=ID(stored=True, unique=True),
type=ID(stored=True),
)
ix = index.create_in(self.index_dir, schema=schema)
try:
with ix.writer() as w:
w.add_document(unique_id=u"1", type=u"type1")
w.add_document(unique_id=u"2", type=u"type2")
raise Exception("some exception")
except Exception:
pass
with ix.searcher() as s:
results = s.search(query.Every())
self.assertEquals(0, len(results))
def test_handles_stop_words_in_queries(self):
schema = WhooshBackend.SCHEMA
ix = index.create_in(self.index_dir, schema=schema)
with ix.writer() as w:
w.add_document(content=u"A nice sentence with stop words.")
with ix.searcher() as s:
query_text = u"with stop"
# field_names both ignore stop words
q = MultifieldParser(['content', 'summary'],
WhooshBackend.SCHEMA).parse(query_text)
self.assertEqual(unicode(q.simplify(s)),
u'((content:with OR summary:with) AND '
u'(content:stop OR summary:stop))')
self.assertEqual(len(s.search(q)), 1)
# 'content' and 'id' ignores stop words
q = MultifieldParser(['content', 'id'],
WhooshBackend.SCHEMA).parse(query_text)
self.assertEqual(unicode(q.simplify(s)),
u'((content:with OR id:with) AND '
u'(content:stop OR id:stop))')
self.assertEqual(len(s.search(q)), 1)
def test_can_filter_to_no_results(self):
schema = Schema(
id=ID(stored=True),
filter=TEXT(stored=True),
)
ix = index.create_in(self.index_dir, schema=schema)
with ix.writer() as w:
w.add_document(id=u"1", filter=u"f1")
w.add_document(id=u"2", filter=u"f2")
with ix.searcher() as s:
r = s.search(
query.Every(),
filter=QueryParser('', schema).parse(u"filter:other")
)
self.assertEquals(len(r), 0)
def suite():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(WhooshBackendTestCase))
test_suite.addTest(unittest.makeSuite(WhooshFunctionalityTestCase))
return test_suite
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
190459 | <reponame>AartGoossens/goldencheetahlib
import vcr
# Set to True to enable 'once' mock recording
MOCK_RECORDING_ENABLED = False
vcr = vcr.VCR(
cassette_library_dir='tests/mock/',
record_mode='once' if MOCK_RECORDING_ENABLED else 'none',
match_on=['uri'],
)
| StarcoderdataPython |
3387321 | with open('input') as f:
binarr = []
for i in f.readlines()[0]:
binarr.append(0)
for line in f.readlines():
for i in range(line):
binarr[i] += line[i]
print(binarr)
| StarcoderdataPython |
1705572 | <filename>agora_graphql/gql/__init__.py
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2018 <NAME>.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
from expiringdict import ExpiringDict
from graphql import parse, build_ast_schema, MiddlewareManager, Source, validate, execute
from graphql.execution import ExecutionResult
from agora_graphql.gql.executor import AgoraExecutor
from agora_graphql.gql.middleware import AgoraMiddleware
from agora_graphql.gql.schema import create_gql_schema
__author__ = '<NAME>'
class GraphQLProcessor(object):
def __init__(self, gateway, schema_path=None, data_gw_cache=None, **kwargs):
self.__gateway = gateway
if schema_path:
with open(schema_path) as f:
source = f.read()
else:
self.__schema_source = create_gql_schema(gateway)
source = self.__schema_source
document = parse(source)
self.__schema = build_ast_schema(document)
abstract_types = filter(lambda x: hasattr(x, 'resolve_type'), dict(self.__schema.get_type_map()).values())
for at in abstract_types:
at.resolve_type = self.__resolve_type
self.__executor = AgoraExecutor(gateway)
if not data_gw_cache:
data_gw_cache = {'max_age_seconds': 300, 'max_len': 1000000}
self.expiring_dict = ExpiringDict(**data_gw_cache)
middleware = AgoraMiddleware(gateway, data_gw_cache=self.expiring_dict, **kwargs)
self.__middleware = MiddlewareManager(middleware)
def __resolve_type(self, *args, **kwargs):
m = self.middleware.middlewares[0]
return m.resolve_type(*args, **kwargs)
@property
def schema_text(self):
return self.__schema_source
@property
def middleware(self):
return self.__middleware
@property
def executor(self):
return self.__executor
@property
def middleware(self):
return self.__middleware
@property
def schema(self):
return self.__schema
def query(self, q):
try:
source = Source(q, name='GraphQL request')
ast = parse(source)
validation_errors = validate(self.schema, ast)
if validation_errors:
return ExecutionResult(
errors=validation_errors,
invalid=True,
)
except Exception as e:
return ExecutionResult(errors=[e], invalid=True)
try:
return execute(self.__schema,
ast,
root_value=None,
variable_values={},
operation_name=None,
context_value={
'query': q,
'introspection': 'introspection' in q.lower()
},
middleware=self.__middleware,
executor=self.__executor
)
except Exception as e:
return ExecutionResult(errors=[e], invalid=True)
| StarcoderdataPython |
4827856 | <gh_stars>0
#!/usr/bin/env python
'''
Example script that generates FEFF input files from a cif file
Remove comment # on write line to actually write files to disk
'''
from __future__ import division
__author__ = "<NAME>"
__credits__= "<NAME>, <NAME>"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0.2"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "April 7, 2013"
import argparse
from pymatgen.io.feffio_set import *
from pymatgen.io.vaspio import *
from pymatgen.io.cifio import CifParser
parser = argparse.ArgumentParser(description='''
Example script to generate FEFF input files from a cif file
Author: <NAME>
Version: 1.0
Last updated: August, 2012''')
parser.add_argument('cif_file', metavar='cif_file', type=str, nargs=1, help='cif_file to use')
parser.add_argument('central_atom', metavar='central_atom', type=str, nargs=1, help='symbol of absorbing atom')
parser.add_argument('calc_type', metavar='calc_type', type=str, nargs=1, help='type of calc, currently XANES or EXAFS')
args = parser.parse_args()
cif_file = args.cif_file[0]
central_atom = args.central_atom[0]
calc_type = args.calc_type[0]
r = CifParser(cif_file)
structure = r.get_structures()[0]
x = FeffInputSet("MaterialsProject")
source = os.path.basename(cif_file)
comment = 'From cif file'
header = FeffInputSet.get_header(x, structure, source, comment)
print "\n\nHEADER\n"
print header
tags = FeffInputSet.get_feff_tags(x, calc_type)
print "\n\nPARAMETERS\n"
print tags
POT = FeffInputSet.get_feff_pot(x, structure, central_atom)
print "\n\nPOTENTIALS\n"
print POT
ATOMS = FeffInputSet.get_feff_atoms(x, structure, central_atom)
print"\n\nATOMS\n"
print ATOMS
#x.write_input(structure, calc_type, source, central_atom, comment, "./feffinput") | StarcoderdataPython |
184517 | from nflows.distributions.base import Distribution
import torch
class ProdDist(Distribution):
"""A Product Distribution with arbitrary marginals."""
def __init__(self, shape, marginals):
super().__init__()
self._shape = torch.Size(shape)
self.d = self._shape[0]
self.marginals = marginals
def _log_prob(self, inputs, context):
# Note: the context is ignored.
if inputs.shape[1:] != self._shape:
raise ValueError(
"Expected input of shape {}, got {}".format(
self._shape, inputs.shape[1:]
)
)
log_prob = 0
for dim in range(self.d):
log_prob += self.marginals[dim].log_prob(inputs[:, dim])
return log_prob # i guess i need to transform the tensor inputs to a np array
def _sample(self, num_samples, context):
if context is None:
dim_wise_sample = []
for dim in range(self.d):
dim_wise_sample.append(self.marginals[dim].sample([num_samples]))
return torch.stack(dim_wise_sample, 1)
else:
raise NotImplementedError
def _mean(self, context):
if context is None:
return self._log_z.new_zeros(self._shape)
else:
# The value of the context is ignored, only its size is taken into account.
return context.new_zeros(context.shape[0], *self._shape) | StarcoderdataPython |
1621445 | <reponame>CatsAreFluffy/generator-regex
'''This is a generator based regex module.'''
class RegexState:
'''An internal state for the regex engine.'''
def __init__(self,string,index=0,captured=None,capturing=None):
'''Create a new internal state. Implicitly opens group 0.'''
self.string=string
self.index=index
if capturing is not None:
self.capturing=capturing
else:
self.capturing={0:index}
if captured is not None:
self.captured=captured
else:
self.captured={}
def __len__(self):
'''Returns remaining string length.'''
return len(self.string)-self.index
def __getitem__(self,key):
'''Indexes into the string relative to the position.'''
if isinstance(key,slice):
if key.start is not None:
start=key.start+self.index
else:
start=self.index
if key.stop is not None:
stop=key.stop+self.index
else:
stop=len(self.string)-self.index
return self.string[start:stop:key.step]
else:
return self.string[self.index+key]
def __str__(self):
'''Returns a string representation for human use.'''
temp=""
for i in self.captured:
temp+=str(i)+":"+self.getcapture(i)+", "
return temp[:-2]
def startcapture(self,capturename):
'''Open a capture.'''
self.capturing[capturename]=self.index
return self
def endcapture(self,capturename):
'''Close a capture.'''
self.captured[capturename]=(self.capturing[capturename],self.index)
del self.capturing[capturename]
return self
def getcapture(self,capturename):
'''Get a capture as a string.'''
return self.string[slice(*self.captured[capturename])]
def goto(self,newpos):
'''Go to a position.'''
self.index=newpos
return self
def advance(self,offset):
'''Advance the current position.'''
self.index+=offset
return self
def copy(self):
'''Return a deepcopy of this object.'''
return RegexState(self.string,self.index,self.captured.copy(),self.capturing.copy())
def debug(self,x=""):
'''Prints debug info.'''
print(self.string,self.index,self.capturing,self.captured,x)
return self
def echo(x):
'''Prints and returns its input. For debugging.'''
print(x)
return x
def regex(x):
'''A wrapper for internal regex generators.'''
def regex_gen(string):
for i in range(len(string)):
for j in x(RegexState(string,i)):
yield j.endcapture(0)
return regex_gen
def charclass(x):
'''Defines a character class. Also used for single characters.'''
def charclass_gen(string):
'''A generator for matching charclasses.'''
if len(string)>0 and string[0] in x:
yield string.advance(1)
return charclass_gen
def inv_charclass(x):
'''Defines an inverted character class. Also used for `.`.'''
def inv_charclass_gen(string):
'''A generator for matching inverted charclasses.'''
if len(string)>0 and string[0] not in x:
yield string.advance(1)
def dot_gen(string):
'''Matches any character except newlines.'''
yield from inv_charclass("\n")(string)
def quantifier(contents,minmatches,maxmatches):
'''Defines a quantifier. Use maxmatches=-1 for unlimited matches.'''
def required_block(string):
'''Subcomponent of a quantifier. Used when a submatch is required.'''
yield from pair(contents,quantifier(contents,minmatches-1,maxmatches-1))(string.copy())
def optional_block(string):
'''Subcomponent of a quantifier. Used when a submatch is optional.'''
yield from optional(pair(contents,quantifier(contents,0,maxmatches-1)))(string.copy())
def nesting_block(string):
'''Subcomponent of a quantifier. Used when the quantifier is unbounded.'''
yield from pair(contents,nesting_block)(string.copy())
yield string
if minmatches>0:
return required_block
elif maxmatches<0:
return nesting_block
elif maxmatches>0:
return optional_block
else:
return nothing_gen
def optional(contents,lazy=False):
'''Defines an optional section.'''
def optional_gen(string):
'''Greedily matches an optional section.'''
yield from contents(string.copy())
yield string
def optional_gen_lazy(string):
'''Lazily matches an optional section.'''
yield string
yield from contents(string.copy())
if lazy:
return optional_gen_lazy
else:
return optional_gen
def anchor(pos):
'''Defines an anchor.'''
def anchor_gen(string):
if string.index==(pos if pos>=0 else pos+len(string.string)+1):
yield string
return anchor_gen
def alternation(*contents):
'''Defines an alternation.'''
def alternation_gen(string):
'''Matches an alternation.'''
for i in contents:
yield from i(string.copy())
return alternation_gen
def capture(name,*contents):
'''Defines a capturing group.'''
def capture_gen(string):
for i in sequence(*contents)(string.copy().startcapture(name)):
yield i.endcapture(name)
i.startcapture(name)
return capture_gen
def backref(name):
'''Defines a backreference.'''
def backref_gen(string):
'''Matches a backreference.'''
a=string.string[slice(*string.captured[name])]
b=string[0:len(a)]
if a==b:
yield string.advance(len(a))
return backref_gen
def sequence(*contents):
'''Joins components.'''
def sequence_gen(string):
'''Matches a sequence of components.'''
yield from pair(contents[0],sequence(*contents[1:]))(string)
if len(contents)==0:
return nothing_gen
elif len(contents)==1:
return contents[0]
else:
return sequence_gen
def zerowidth(*contents):
'''Defines a zero width assertion.'''
def zerowidth_gen(string):
x=string.index
yield from (i.goto(x) for i in sequence(*contents)(string))
return zerowidth_gen
def nothing_gen(string):
'''Matches the empty string.'''
yield string
def pair(content1,content2):
'''Joins two components.'''
def pair_gen(string):
'''Matches two components.'''
yield from (j for i in content1(string.copy()) for j in content2(i))
return pair_gen
def compile_regex(string):
'''Compile a regular expression.'''
inter=[regex,[sequence]]
ptr=0
captures=0
index=[1]
def getpos():
'''Return the current operating list.'''
temp=inter
for i in index:
temp=temp[i]
return temp
while ptr<len(string):
if string[ptr]=="[":
temp=[] #chars in charclass
invert=string[ptr+1]=="^"
ptr+=1
while string[ptr]!="]":
temp.append(string[ptr])
ptr+=1
ptr+=1
if invert:
getpos().append([inv_charclass,"".join(temp)])
else:
getpos().append([charclass,"".join(temp)])
elif string[ptr]=="|":
if getpos()[0]!=alternation:
temp=getpos()
temp[:]=[alternation,temp[:]]
index.insert(0,1) #I forgot what this does
index.pop()
getpos().append([sequence])
index.append(len(getpos())-1)
ptr+=1
elif string[ptr]=="*":
if isinstance(getpos()[-1],list):
getpos()[-1]=[quantifier,getpos()[-1][:],0,-1]
else:
getpos()[-1]=[quantifier,getpos()[-1],0,-1]
ptr+=1
elif string[ptr]=="+":
if isinstance(getpos()[-1],list):
getpos()[-1]=[quantifier,getpos()[-1][:],1,-1]
else:
getpos()[-1]=[quantifier,getpos()[-1],1,-1]
ptr+=1
elif string[ptr]=="?":
if isinstance(getpos()[-1],list):
getpos()[-1]=[optional,getpos()[-1][:]]
else:
getpos()[-1]=[optional,getpos()[-1]]
ptr+=1
if len(string)>ptr and string[ptr]=="?":
getpos[-1].append(True)
ptr+=1
elif string[ptr]==".":
getpos().append([inv_charclass,"\n"])
ptr+=1
elif string[ptr]=="^":
getpos().append([anchor,0])
ptr+=1
elif string[ptr]=="$":
getpos().append([anchor,-1])
ptr+=1
elif string[ptr]=="(":
if string[ptr+1]=="?":
if string[ptr+2]==":":
getpos().append([sequence])
index.append(len(getpos())-1)
ptr+=2
elif string[ptr+2]=="=":
getpos().append([zerowidth])
index.append(len(getpos())-1)
ptr+=2
else:
raise ValueError("Invalid group type")
else:
captures+=1
getpos().append([capture,captures])
index.append(len(getpos())-1)
ptr+=1
elif string[ptr]==")":
index.pop()
ptr+=1
elif string[ptr]=="\\":
ptr+=1
if string[ptr] in "1234567890":
temp=""
while len(string)>ptr and string[ptr] in "1234567890":
temp+=string[ptr]
ptr+=1
getpos().append([backref,int(temp)])
else:
getpos().append([charclass,string[ptr]])
ptr+=1
else:
getpos().append([charclass,string[ptr]])
ptr+=1
def unquote(x):
'''Convert list format into generators.'''
if isinstance(x,list):
return x[0](*[unquote(i) for i in x[1:]])
else:
return x
def lformat(x):
'''Convert list format to readable string.'''
if isinstance(x,list):
return lformat(x[0])+"("+",".join([lformat(i) for i in x[1:]])+")"
elif isinstance(x,int):
return str(x)
elif isinstance(x,str):
return repr(x)
else:
return str(x).split()[1]
#print(*inter[1],sep="\n") #print list form
print(lformat(inter)) #print generator form
return unquote(inter)
for i in compile_regex(r"{[0123456789]+,?[0123456789]*}")("{9,} {,} {9} {,5} {} {1,2}"):
print(i)
| StarcoderdataPython |
22601 | import pytest
from labelsync.github import Github
from labelsync.helpers import HTTPError
from tests.helpers import fl, FIXTURES_PATH, create_cfg_env, get_labels
c = create_cfg_env('good.cfg')
github = Github(c, name='github', api_url='https://api.github.com/repos')
label = {
'name':'blabla',
'color':'aa11bb',
'description':'whatever'
}
label_bug = {
'name':'bug',
'color':'d73a4a',
'description':'Something isn\'t working'
}
label_new_bug = {
'name':'ERROR',
'color':'ffffff',
'description':'ERROR'
}
def test_create_label():
labels_before = get_labels('beskyfil', 'testing_repo')
num_labels_before = len(labels_before)
github.create_label('beskyfil', 'testing_repo', label)
labels_after = get_labels('beskyfil', 'testing_repo')
num_labels_after = len(labels_after)
assert num_labels_before == num_labels_after - 1
assert 'blabla' not in labels_before
assert 'blabla' in labels_after
def test_delete_label():
labels_before = get_labels('beskyfil', 'testing_repo')
num_labels_before = len(labels_before)
github.delete_label('beskyfil', 'testing_repo', label['name'])
labels_after = get_labels('beskyfil', 'testing_repo')
num_labels_after = len(labels_after)
assert num_labels_before == num_labels_after + 1
assert 'blabla' in labels_before
assert 'blabla' not in labels_after
def test_edit_label():
labels_before = get_labels('beskyfil', 'testing_repo')
num_labels_before = len(labels_before)
github.edit_label('beskyfil', 'testing_repo', label_new_bug, 'bug')
labels_after = get_labels('beskyfil', 'testing_repo')
num_labels_after = len(labels_after)
assert num_labels_before == num_labels_after
assert 'bug' in labels_before
assert 'bug' not in labels_after
assert 'ERROR' in labels_after
assert 'ERROR' not in labels_before
#revert
github.edit_label('beskyfil', 'testing_repo', label_bug, 'ERROR')
| StarcoderdataPython |
3209909 | import argparse, textwrap
parser = argparse.ArgumentParser(
description=textwrap.dedent(
"""\
A python script to fetch submissions and comments using PRAW API
"""
),
usage='Use "python3 %(prog)s -h" for more information',
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"-sc",
"--submissions_count",
type=int,
default=10,
help="The number of submissions to crawl in the subreddits",
)
parser.add_argument(
"-st",
"--submissions_type",
type=str,
default="hot",
help="The submissions type to crawl in the subreddits",
)
parser.add_argument(
"-tf",
"--time_filter",
type=str,
default="day",
help="The submissions type to crawl in the subreddits",
)
parser.add_argument(
"-cc",
"--comments_count",
type=str,
default="32",
help="The number of MoreComments to crawl in the comments section",
)
parser.add_argument(
"-op",
"--output_path",
type=str,
default="./output/",
help="Output path for the processed files",
)
parser.add_argument(
"-ip",
"--input_path",
type=str,
default="./input/",
help="Input path for the subreddits_to_crawl file",
)
parser.add_argument(
"-ifn",
"--input_file_name",
type=str,
default="subreddits_to_crawl.csv",
help="File containing csv of subreddits to crawl",
)
parser.add_argument(
"-svt",
"--save_type",
type=str,
default="csv",
help=textwrap.dedent(
"""\
Save mode, can be csv, db, dbwi. Defaults to csv.
csv - csv file
db - db mode with no initialization(tables are expected to exist)
dbwi - db mode with initialization, tables are created as per the statements in `db_tables["init"] arg variable`"""
),
)
feature_parser = parser.add_mutually_exclusive_group(required=False)
feature_parser.add_argument(
"-c",
"--comments",
dest="comments",
action="store_true",
help="Flag to switch on the crawling of comments",
)
feature_parser.add_argument(
"-nc",
"--no-comments",
dest="comments",
action="store_false",
help="Flag to switch off the crawling of comments",
)
parser.set_defaults(comments=True)
args = parser.parse_args()
if args.comments_count == "None":
args.comments_count = None
else:
try:
args.comments_count = int(args.comments_count)
except ValueError:
print("Please pass a number or None for the --comments_count (-cc) option")
raise
| StarcoderdataPython |
1730648 | """
2016 Day 5
https://adventofcode.com/2016/day/5
"""
from functools import lru_cache
from hashlib import md5
import aocd # type: ignore
@lru_cache
def md5hash(door: str, index: int) -> str:
"""
Calculate the md5 hash for the given Door ID and integer index.
"""
return md5((door + str(index)).encode("utf-8")).hexdigest()
def create_password(door: str) -> str:
"""
Create a password for the given door - i.e. the sixth character of each of the first eight
hashes beginning with five 0s.
"""
password = ""
index = 0
while len(password) < 8:
hsh = md5hash(door, index)
if hsh[:5] == "00000":
password += hsh[5]
index += 1
return password
def create_password2(door: str) -> str:
"""
Create the password using the part 2 algorithm.
"""
password: list[str] = ["z", "z", "z", "z", "z", "z", "z", "z"]
index = 0
while any(digit == "z" for digit in password) > 0:
hsh = md5hash(door, index)
if hsh[:5] == "00000":
try:
digit = int(hsh[5])
except ValueError:
digit = 8
if digit <= 7 and password[digit] == "z":
password[digit] = hsh[6]
index += 1
return "".join(password)
def main() -> None:
"""
Calculate and output the solutions based on the real puzzle input.
"""
data = aocd.get_data(year=2016, day=5)
print(f"Part 1: {create_password(data)}")
print(f"Part 2: {create_password2(data)}")
if __name__ == "__main__":
main()
| StarcoderdataPython |
4835099 | import torch
from torch import nn
from mish_activation import Mish
class Head(nn.Module):
def __init__(self, nc, n, ps=0.5):
super().__init__()
layers = (
[AdaptiveConcatPool2d(), Mish(), Flatten()]
+ bn_drop_lin(nc * 2, 512, True, ps, Mish())
+ bn_drop_lin(512, n, True, ps)
)
self.fc = nn.Sequential(*layers)
self._init_weight()
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1.0)
m.bias.data.zero_()
def forward(self, x):
return self.fc(x)
class DensenetOneChannel(nn.Module):
def __init__(self, arch, n, pretrained=True, ps=0.5):
super().__init__()
m = arch(True) if pretrained else arch()
# change the first conv to accept 1 chanel input
conv = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
w = (m.features.conv0.weight.sum(1)).unsqueeze(1)
conv.weight = nn.Parameter(w)
self.layer0 = nn.Sequential(conv, m.features.norm0, nn.ReLU(inplace=True))
self.layer1 = nn.Sequential(
nn.MaxPool2d(
kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False
),
m.features.denseblock1,
)
self.layer2 = nn.Sequential(m.features.transition1, m.features.denseblock2)
self.layer3 = nn.Sequential(m.features.transition2, m.features.denseblock3)
self.layer4 = nn.Sequential(
m.features.transition3, m.features.denseblock4, m.features.norm5
)
nc = self.layer4[-1].weight.shape[0]
self.head1 = Head(nc, n[0])
self.head2 = Head(nc, n[1])
self.head3 = Head(nc, n[2])
def forward(self, x):
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x1 = self.head1(x)
x2 = self.head2(x)
x3 = self.head3(x)
return x1, x2, x3
class Flatten(nn.Module):
"Flatten `x` to a single dimension, often used at the end of a model. `full` for rank-1 tensor"
def __init__(self, full: bool = False):
self.full = full
def forward(self, x):
return x.view(-1) if self.full else x.view(x.size(0), -1)
class AdaptiveConcatPool2d(nn.Module):
"Layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`."
def __init__(self, sz: int = None):
"Output will be 2*sz or 2 if sz is None"
self.output_size = sz or 1
self.ap = nn.AdaptiveAvgPool2d(self.output_size)
self.mp = nn.AdaptiveMaxPool2d(self.output_size)
def forward(self, x):
return torch.cat([self.mp(x), self.ap(x)], 1)
def bn_drop_lin(
n_in: int,
n_out: int,
bn: bool = True,
p: float = 0.0,
actn: nn.Module = None,
):
"Sequence of batchnorm (if `bn`), dropout (with `p`) and linear (`n_in`,`n_out`) layers followed by `actn`."
layers = [nn.BatchNorm1d(n_in)] if bn else []
if p != 0:
layers.append(nn.Dropout(p))
layers.append(nn.Linear(n_in, n_out))
if actn is not None:
layers.append(actn)
return layers
| StarcoderdataPython |
3296233 | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by <NAME> (<EMAIL>)
# -----------------------------------------------------
"""Custum training dataset."""
import copy
import os
import cv2
import pickle as pk
from abc import abstractmethod, abstractproperty
import torch.utils.data as data
from pycocotools.coco import COCO
from alphapose.utils.presets import SimpleTransform
class CustomDataset(data.Dataset):
"""Custom dataset.
Annotation file must be in `coco` format.
Parameters
----------
train: bool, default is True
If true, will set as training mode.
dpg: bool, default is False
If true, will activate `dpg` for data augmentation.
skip_empty: bool, default is False
Whether skip entire image if no valid label is found.
cfg: dict, dataset configuration.
"""
CLASSES = None
def __init__(self,
train=True,
dpg=False,
skip_empty=True,
lazy_import=False,
**cfg):
self._cfg = cfg
self._preset_cfg = cfg['PRESET']
self._root = cfg['ROOT']
self._img_prefix = cfg['IMG_PREFIX']
self._ann_file = os.path.join(self._root, cfg['ANN'])
self._lazy_import = lazy_import
self._skip_empty = skip_empty
self._train = train
self._dpg = dpg
if 'AUG' in cfg.keys():
self._scale_factor = cfg['AUG']['SCALE_FACTOR']
self._rot = cfg['AUG']['ROT_FACTOR']
self.num_joints_half_body = cfg['AUG']['NUM_JOINTS_HALF_BODY']
self.prob_half_body = cfg['AUG']['PROB_HALF_BODY']
else:
self._scale_factor = 0
self._rot = 0
self.num_joints_half_body = -1
self.prob_half_body = -1
self._input_size = self._preset_cfg['IMAGE_SIZE']
self._output_size = self._preset_cfg['HEATMAP_SIZE']
self._sigma = self._preset_cfg['SIGMA']
self._check_centers = False
self.num_class = len(self.CLASSES)
self._loss_type = self._preset_cfg.get('LOSS_TYPE', 'MSELoss')
self.upper_body_ids = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
self.lower_body_ids = (11, 12, 13, 14, 15, 16)
if self._preset_cfg['TYPE'] == 'simple':
self.transformation = SimpleTransform(
self, scale_factor=self._scale_factor,
input_size=self._input_size,
output_size=self._output_size,
rot=self._rot, sigma=self._sigma,
train=self._train, add_dpg=self._dpg,
loss_type=self._loss_type)
else:
raise NotImplementedError
self._items, self._labels = self._lazy_load_json()
def __getitem__(self, idx):
# get image id
img_path = self._items[idx]
img_id = int(os.path.splitext(os.path.basename(img_path))[0])
# load ground truth, including bbox, keypoints, image size
label = copy.deepcopy(self._labels[idx])
img = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB) #scipy.misc.imread(img_path, mode='RGB') is depreciated
# transform ground truth into training label and apply data augmentation
img, label, label_mask, bbox = self.transformation(img, label)
return img, label, label_mask, img_id, bbox
def __len__(self):
return len(self._items)
def _lazy_load_ann_file(self):
if os.path.exists(self._ann_file + '.pkl') and self._lazy_import:
print('Lazy load json...')
with open(self._ann_file + '.pkl', 'rb') as fid:
return pk.load(fid)
else:
_database = COCO(self._ann_file)
if os.access(self._ann_file + '.pkl', os.W_OK):
with open(self._ann_file + '.pkl', 'wb') as fid:
pk.dump(_database, fid, pk.HIGHEST_PROTOCOL)
return _database
def _lazy_load_json(self):
if os.path.exists(self._ann_file + '_annot_keypoint.pkl') and self._lazy_import:
print('Lazy load annot...')
with open(self._ann_file + '_annot_keypoint.pkl', 'rb') as fid:
items, labels = pk.load(fid)
else:
items, labels = self._load_jsons()
if os.access(self._ann_file + '_annot_keypoint.pkl', os.W_OK):
with open(self._ann_file + '_annot_keypoint.pkl', 'wb') as fid:
pk.dump((items, labels), fid, pk.HIGHEST_PROTOCOL)
return items, labels
@abstractmethod
def _load_jsons(self):
pass
@abstractproperty
def CLASSES(self):
return None
@abstractproperty
def num_joints(self):
return None
@abstractproperty
def joint_pairs(self):
"""Joint pairs which defines the pairs of joint to be swapped
when the image is flipped horizontally."""
return None
| StarcoderdataPython |
3317595 | import bs4 as bs
import os
import os.path
import requests
import base64
import json
from datetime import datetime, timezone
from requests.packages import urllib3
# rrdtool dump test.rrd > test.xml
cfg_name = 'config.json'
db_path = '/home/lcladmin/cmstats/data/'
web_path = '/var/www/html/cmstats/'
def main():
with open(db_path + cfg_name) as f:
config_text = f.read()
config = json.loads(config_text)
conn_type = config['conn_type']
if (conn_type == 'http'):
read_http()
elif (conn_type == 'https'):
username = config['username']
password = config['password']
read_https(username, password)
else:
raise Exception('invalid conn_type')
def read_http():
cm_status_page = requests.get('http://192.168.100.1/cmconnectionstatus.html').text
cm_info_page = requests.get('http://192.168.100.1/cmswinfo.html').text
parse_all(cm_status_page, cm_info_page)
def read_https(username, password):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
s = requests.Session()
r0 = s.get('https://192.168.100.1', verify=False)
message = username + ':' + password
message_bytes = message.encode('ascii')
base64_bytes = base64.b64encode(message_bytes)
cm_cred = base64_bytes.decode('ascii')
cm_head = {'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8', 'Authorization': 'Basic ' + cm_cred}
r1 = s.get('https://192.168.100.1/cmconnectionstatus.html?login_' + cm_cred, headers=cm_head, verify=False)
cm_ct = r1.text
r2a = s.get('https://192.168.100.1/cmconnectionstatus.html?ct_' + cm_ct, verify=False)
r2b = s.get('https://192.168.100.1/cmswinfo.html?ct_' + cm_ct, verify=False)
try:
s.get('https://192.168.100.1/logout.html', verify=False)
except:
pass
parse_all(r2a.text, r2b.text)
def parse_all(cm_status_page, cm_info_page):
channels = parse_cm_status(cm_status_page)
information = parse_cm_info(cm_info_page)
update_rrd(channels, information)
def parse_cm_status(source):
soup = bs.BeautifulSoup(source, 'lxml')
tables = soup.find_all('table', attrs={'class':'simpleTable'})
ds_table = tables[1]
ds_channels = ds_table.find_all('tr', attrs={'align':'left'})
ds = []
for ds_channel in ds_channels:
cols = ds_channel.find_all('td')
channel_id = cols[0].text.strip()
lock_status = cols[1].text.strip()
modulation = cols[2].text.strip()
raw_frequency = cols[3].text.strip()
raw_power = cols[4].text.strip()
raw_snr = cols[5].text.strip()
corrected = cols[6].text.strip()
uncorrected = cols[7].text.strip()
# print('* downstream channel raw values *')
# print('channel id: ' + channel_id)
# print('lock status: ' + lock_status)
# print('modulation: ' + modulation)
# print('frequency: ' + raw_frequency)
# print('power: ' + raw_power)
# print('snr: ' + raw_snr)
# print('corrected: ' + corrected)
# print('uncorrected: ' + uncorrected)
frequency = raw_frequency.replace(' Hz', '')
power = raw_power.replace(' dBmV', '')
snr = raw_snr.replace(' dB', '')
# print('* downstream channel parsed values *')
# print('frequency: ' + frequency)
# print('power: ' + power)
# print('snr: ' + snr)
# print('corrected: ' + corrected)
# print('uncorrected: ' + uncorrected)
ds_channel_values = {
'frequency': frequency,
'power': power,
'snr': snr,
'corrected': corrected,
'uncorrected': uncorrected
}
ds.append(ds_channel_values)
us_table = tables[2]
us_channels = us_table.find_all('tr', attrs={'align':'left'})
us = []
for us_channel in us_channels:
cols = us_channel.find_all('td')
channel = cols[0].text.strip()
channel_id = cols[1].text.strip()
lock_status = cols[2].text.strip()
modulation = cols[3].text.strip()
raw_frequency = cols[4].text.strip()
raw_width = cols[5].text.strip()
raw_power = cols[6].text.strip()
# print('* upstream channel raw values *')
# print('channel: ' + channel)
# print('channel id: ' + channel_id)
# print('lock status: ' + lock_status)
# print('modulation: ' + modulation)
# print('frequency: ' + raw_frequency)
# print('width: ' + raw_width)
# print('power: ' + raw_power)
frequency = raw_frequency.replace(' Hz', '')
width = raw_width.replace(' Hz', '')
power = raw_power.replace(' dBmV', '')
# print('* upstream channel parsed values *')
# print('frequency: ' + frequency)
# print('width: ' + width)
# print('power: ' + power)
us_channel_values = {
'frequency': frequency,
'width': width,
'power': power
}
us.append(us_channel_values)
ret = {
'downstream': ds,
'upstream': us
}
return ret
def parse_cm_info(source):
soup = bs.BeautifulSoup(source, 'lxml')
# model number
header_elements = soup.find_all('span', attrs={'id':'thisModelNumberIs'})
header_element = header_elements[0]
model_number = header_element.text.strip()
# information table
tables = soup.find_all('table', attrs={'class':'simpleTable'})
info_table = tables[0]
info_elements = info_table.find_all('tr')
# hardware version
hw_ver_elements = info_elements[2]
hw_ver_cols = hw_ver_elements.find_all('td')
hw_ver = hw_ver_cols[1].text.strip()
# software version
sw_ver_elements = info_elements[3]
sw_ver_cols = sw_ver_elements.find_all('td')
sw_ver = sw_ver_cols[1].text.strip()
# hfc mac
hfc_mac_elements = info_elements[4]
hfc_mac_cols = hfc_mac_elements.find_all('td')
hfc_mac = hfc_mac_cols[1].text.strip()
# serial number
ser_num_elements = info_elements[5]
ser_num_cols = ser_num_elements.find_all('td')
ser_num = ser_num_cols[1].text.strip()
# status table
status_table = tables[1]
status_elements = status_table.find_all('tr')
# uptime
uptime_elements = status_elements[1]
uptime_cols = uptime_elements.find_all('td')
uptime = uptime_cols[1].text.strip()
# print('* product information raw values *')
# print('model number: ' + model_number)
# print('hardware version: ' + hw_ver)
# print('software version: ' + sw_ver)
# print('hfc mac: ' + hfc_mac)
# print('serial number: ' + ser_num)
# print('uptime: ' + uptime)
ret = {
'model_number': model_number,
'hw_ver': hw_ver,
'sw_ver': sw_ver,
'hfc_mac': hfc_mac,
'ser_num': ser_num,
'uptime': uptime
}
return ret
def get_frequency_value(elem):
return int(elem['frequency'])
def update_rrd(channels, information):
# sort channels by frequency
channels['downstream'] = sorted(channels['downstream'], key=get_frequency_value)
channels['upstream'] = sorted(channels['upstream'], key=get_frequency_value)
db_ext = '.rrd'
img_ext = '.png'
current_time = datetime.now(timezone.utc).isoformat()
# **** DOWNSTREAM ****
ds_path = db_path + 'downstream/'
graph_path = web_path
index_contents = str(
'<html><head><title>' +
'Cable Modem Statistics (' +
'Model: ' + information['model_number'] + ', ' +
'MAC: ' + information['hfc_mac'] + ', ' +
'Serial: ' + information['ser_num'] +
')</title></head><body>' +
'<h2>Cable Modem Statistics</h2>' +
'<h3>Last Update</h3>' +
'<p>' + current_time + '</p>' +
'<h3>Modem Information</h3>' +
'<table border="1">' +
'<tr>' +
'<th align="left">Model Number</th>' +
'<td>' + information['model_number'] + '</td>' +
'</tr>' +
'<tr>' +
'<th align="left">Hardware Version</th>' +
'<td>' + information['hw_ver'] + '</td>' +
'</tr>' +
'<tr>' +
'<th align="left">Software Version</th>' +
'<td>' + information['sw_ver'] + '</td>' +
'</tr>' +
'<tr>' +
'<th align="left">HFC MAC Address</th>' +
'<td>' + information['hfc_mac'] + '</td>' +
'</tr>' +
'<tr>' +
'<th align="left">Serial Number</th>' +
'<td>' + information['ser_num'] + '</td>' +
'</tr>' +
'<tr>' +
'<th align="left">Uptime</th>' +
'<td>' + information['uptime'] + '</td>' +
'</tr>' +
'</table>'
)
index_page_ds_summary_contents = str(
'<h3>Downstream Channels Summary</h3>' +
'<table border="1">' +
'<tr>' +
'<th>Frequency (Hz)</th>' +
'<th>Power (dBm)</th>' +
'<th>SNR (dB)</th>' +
'<th>Corrected (Symbols)</th>' +
'<th>Uncorrected (Symbols)</th>' +
'</tr>'
)
# power
ds_power_all_path = graph_path + 'downstream_all_power' + img_ext
ds_power_all_cmd = str(
'rrdtool graph ' + ds_power_all_path + ' -a PNG ' +
'--width 800 --height 400 --title "Power" ' +
'--vertical-label "dBm" --disable-rrdtool-tag '
)
# snr
ds_snr_all_path = graph_path + 'downstream_all_snr' + img_ext
ds_snr_all_cmd = str(
'rrdtool graph ' + ds_snr_all_path + ' -a PNG ' +
'--width 800 --height 400 --title "SNR" ' +
'--vertical-label "dB" --disable-rrdtool-tag '
)
# corrected
ds_corrected_all_path = graph_path + 'downstream_all_corrected' + img_ext
ds_corrected_all_cmd = str(
'rrdtool graph ' + ds_corrected_all_path + ' -a PNG ' +
'--width 800 --height 400 --title "Corrected" ' +
'--vertical-label "Symbols" --upper-limit 1 --disable-rrdtool-tag '
)
# uncorrected
ds_uncorrected_all_path = graph_path + 'downstream_all_uncorrected' + img_ext
ds_uncorrected_all_cmd = str(
'rrdtool graph ' + ds_uncorrected_all_path + ' -a PNG ' +
'--width 800 --height 400 --title "Uncorrected" ' +
'--vertical-label "Symbols" --upper-limit 1 --disable-rrdtool-tag '
)
for ds_channel in channels['downstream']:
frequency = ds_channel['frequency']
power = ds_channel['power']
snr = ds_channel['snr']
corrected = ds_channel['corrected']
uncorrected = ds_channel['uncorrected']
ds_ch_path = ds_path + frequency + db_ext
if (not os.path.exists(ds_ch_path)):
os.system(
'rrdtool create ' + ds_ch_path + ' ' +
'--start N --step 300 ' +
'DS:power:GAUGE:600:U:U ' +
'DS:snr:GAUGE:600:U:U ' +
'DS:corrected:DERIVE:600:0:U ' +
'DS:uncorrected:DERIVE:600:0:U ' +
'RRA:AVERAGE:0.5:1:1440'
)
os.system(
'rrdtool update ' + ds_ch_path + ' ' +
'N:' + power + ':' + snr + ':' + corrected + ':' + uncorrected
)
# power
power_graph_path = graph_path + 'downstream_' + frequency + '_power' + img_ext
ds_power_ch_cmd = str(
'rrdtool graph ' + power_graph_path + ' -a PNG --title="' + frequency + ' Hz" ' +
'--vertical-label "dBm" --disable-rrdtool-tag ' +
'DEF:power=' + ds_ch_path + ':power:AVERAGE ' +
'LINE1:power#ff0000:Power'
)
os.system(ds_power_ch_cmd)
ds_power_all_cmd = ds_power_all_cmd + str(
'DEF:' + frequency + '=' + ds_ch_path + ':power:AVERAGE ' +
'LINE1:' + frequency + '#ff0000:' + frequency + 'Hz '
)
# snr
snr_graph_path = graph_path + 'downstream_' + frequency + '_snr' + img_ext
ds_snr_ch_cmd = str(
'rrdtool graph ' + snr_graph_path + ' -a PNG --title="' + frequency + ' Hz" ' +
'--vertical-label "dB" --disable-rrdtool-tag ' +
'DEF:snr=' + ds_ch_path + ':snr:AVERAGE ' +
'LINE1:snr#ff0000:SNR'
)
os.system(ds_snr_ch_cmd)
ds_snr_all_cmd = ds_snr_all_cmd + str(
'DEF:' + frequency + '=' + ds_ch_path + ':snr:AVERAGE ' +
'LINE1:' + frequency + '#ff0000:' + frequency + 'Hz '
)
# corrected
corrected_graph_path = graph_path + 'downstream_' + frequency + '_corrected' + img_ext
ds_corrected_ch_cmd = str(
'rrdtool graph ' + corrected_graph_path + ' -a PNG --title="' + frequency + ' Hz" ' +
'--vertical-label "Symbols" --upper-limit 1 --disable-rrdtool-tag ' +
'DEF:corrected=' + ds_ch_path + ':corrected:AVERAGE ' +
'LINE1:corrected#ff0000:Corrected'
)
os.system(ds_corrected_ch_cmd)
ds_corrected_all_cmd = ds_corrected_all_cmd + str(
'DEF:' + frequency + '=' + ds_ch_path + ':corrected:AVERAGE ' +
'LINE1:' + frequency + '#ff0000:' + frequency + 'Hz '
)
# uncorrected
uncorrected_graph_path = graph_path + 'downstream_' + frequency + '_uncorrected' + img_ext
ds_uncorrected_ch_cmd = str(
'rrdtool graph ' + uncorrected_graph_path + ' -a PNG --title="' + frequency + ' Hz" ' +
'--vertical-label "Symbols" --upper-limit 1 --disable-rrdtool-tag ' +
'DEF:uncorrected=' + ds_ch_path + ':uncorrected:AVERAGE ' +
'LINE1:uncorrected#ff0000:Uncorrected'
)
os.system(ds_uncorrected_ch_cmd)
ds_uncorrected_all_cmd = ds_uncorrected_all_cmd + str(
'DEF:' + frequency + '=' + ds_ch_path + ':uncorrected:AVERAGE ' +
'LINE1:' + frequency + '#ff0000:' + frequency + 'Hz '
)
# power
lower_power_limit = -15
upper_power_limit = 15
if ((float(power) > lower_power_limit) and (float(power) < upper_power_limit)):
power_style = ' style="background-color:#00FF00"'
else:
power_style = ' style="background-color:#FF0000"'
# snr
if ((float(power) > -6) and (float(power) < 15)):
lower_snr_limit = 30
else:
lower_snr_limit = 33
if ((float(snr)) > lower_snr_limit):
snr_style = ' style="background-color:#00FF00"'
else:
snr_style = ' style="background-color:#FF0000"'
index_page_ds_summary_contents = index_page_ds_summary_contents + str(
'<tr>' +
'<td><a href="downstream_' + frequency + '.html">' + frequency + '</a></td>' +
'<td' + power_style + '>' + power + '</td>' +
'<td' + snr_style + '>' + snr + '</td>' +
'<td>' + corrected + '</td>' +
'<td>' + uncorrected + '</td>' +
'</tr>'
)
ch_page_contents = str(
'<html><head><title>Downstream Channel Details (' + frequency + ' Hz)</title></head><body>' +
'<h2>Downstream Channel Details (' + frequency + ' Hz)</h2>' +
'<h3>Last Update</h3>' +
'<p>' + current_time + '</p>' +
'<h3>Downstream Channel Summary</h3>' +
'<table border="1">' +
'<tr>' +
'<th>Frequency (Hz)</th>' +
'<th>Lower Power Limit (dBm)</th>' +
'<th>Actual Power (dBm)</th>' +
'<th>Upper Power Limit (dBm)</th>' +
'<th>Lower SNR Limit (dB)</th>' +
'<th>Actual SNR (dB)</th>' +
'<th>Corrected (Symbols)</th>' +
'<th>Uncorrected (Symbols)</th>' +
'</tr>' +
'<tr>' +
'<td>' + frequency + '</td>' +
'<td>' + str(lower_power_limit) + '</td>' +
'<td' + power_style + '>' + power + '</td>' +
'<td>' + str(upper_power_limit) + '</td>' +
'<td>' + str(lower_snr_limit) + '</td>' +
'<td' + snr_style + '>' + snr + '</td>' +
'<td>' + corrected + '</td>' +
'<td>' + uncorrected + '</td>' +
'</tr>' +
'</table>' +
'<h3>Downstream Channel Graphs</h3>' +
'<img src="downstream_' + frequency + '_power.png"/><br/><br/>' +
'<img src="downstream_' + frequency + '_snr.png"/><br/><br/>' +
'<img src="downstream_' + frequency + '_corrected.png"/><br/><br/>' +
'<img src="downstream_' + frequency + '_uncorrected.png"/>' +
'</body></html>'
)
with open(web_path + 'downstream_' + frequency + '.html', 'w') as f:
f.write(ch_page_contents)
# power
os.system(ds_power_all_cmd)
# snr
os.system(ds_snr_all_cmd)
# corrected
os.system(ds_corrected_all_cmd)
# uncorrected
os.system(ds_uncorrected_all_cmd)
index_page_ds_summary_contents = index_page_ds_summary_contents + str(
'</table>'
)
# **** UPSTREAM ****
us_path = db_path + 'upstream/'
index_page_us_summary_contents = str(
'<h3>Upstream Channels Summary</h3>' +
'<table border="1"><tr><th>Frequency (Hz)</th><th>Width (Hz)</th><th>Power (dBm)</th></tr>'
)
# width
us_width_all_path = graph_path + 'upstream_all_width' + img_ext
us_width_all_cmd = str(
'rrdtool graph ' + us_width_all_path + ' -a PNG ' +
'--width 800 --height 400 --title "Width" ' +
'--vertical-label "Hz" --disable-rrdtool-tag '
)
# power
us_power_all_path = graph_path + 'upstream_all_power' + img_ext
us_power_all_cmd = str(
'rrdtool graph ' + us_power_all_path + ' -a PNG ' +
'--width 800 --height 400 --title "Power" ' +
'--vertical-label "dBm" --disable-rrdtool-tag '
)
for us_channel in channels['upstream']:
channel_count = len(channels['upstream'])
frequency = us_channel['frequency']
width = us_channel['width']
power = us_channel['power']
us_ch_path = us_path + frequency + db_ext
if (not os.path.exists(us_ch_path)):
os.system(
'rrdtool create ' + us_ch_path + ' ' +
'--start N --step 300 ' +
'DS:width:GAUGE:600:U:U ' +
'DS:power:GAUGE:600:U:U ' +
'RRA:AVERAGE:0.5:1:1440'
)
os.system(
'rrdtool update ' + us_ch_path + ' ' +
'N:' + width + ':' + power
)
# width
width_graph_path = graph_path + 'upstream_' + frequency + '_width' + img_ext
os.system(
'rrdtool graph ' + width_graph_path + ' -a PNG --title="' + frequency + ' Hz" ' +
'--vertical-label "Hz" --disable-rrdtool-tag ' +
'DEF:width=' + us_ch_path + ':width:AVERAGE ' +
'LINE1:width#ff0000:Width'
)
us_width_all_cmd = us_width_all_cmd + str(
'DEF:' + frequency + '=' + us_ch_path + ':width:AVERAGE ' +
'LINE1:' + frequency + '#ff0000:' + frequency + 'Hz '
)
# power
power_graph_path = graph_path + 'upstream_' + frequency + '_power' + img_ext
os.system(
'rrdtool graph ' + power_graph_path + ' -a PNG --title="' + frequency + ' Hz" ' +
'--vertical-label "dBm" --disable-rrdtool-tag ' +
'DEF:power=' + us_ch_path + ':power:AVERAGE ' +
'LINE1:power#ff0000:Power'
)
us_power_all_cmd = us_power_all_cmd + str(
'DEF:' + frequency + '=' + us_ch_path + ':power:AVERAGE ' +
'LINE1:' + frequency + '#ff0000:' + frequency + 'Hz '
)
# power
if (channel_count == 1):
lower_power_limit = 45
upper_power_limit = 61
elif (channel_count == 2):
lower_power_limit = 45
upper_power_limit = 54
else:
lower_power_limit = 45
upper_power_limit = 51
if ((float(power) > lower_power_limit) and (float(power) < upper_power_limit)):
power_style = ' style="background-color:#00FF00"'
else:
power_style = ' style="background-color:#FF0000"'
index_page_us_summary_contents = index_page_us_summary_contents + str(
'<tr>' +
'<td><a href="upstream_' + frequency + '.html">' + frequency + '</a></td>' +
'<td>' + width + '</td>' +
'<td' + power_style + '>' + power + '</td>' +
'</tr>'
)
ch_page_contents = str(
'<html><head><title>Upstream Channel Details (' + frequency + ' Hz)</title></head><body>' +
'<h2>Upstream Channel Details (' + frequency + ' Hz)</h2>' +
'<h3>Last Update</h3>' +
'<p>' + current_time + '</p>' +
'<h3>Upstream Channel Summary</h3>' +
'<table border="1">' +
'<tr>' +
'<th>Frequency (Hz)</th>' +
'<th>Width (Hz)</th>' +
'<th>Lower Power Limit (dBm)</th>' +
'<th>Actual Power (dBm)</th>' +
'<th>Upper Power Limit (dBm)</th>' +
'</tr>' +
'<tr>' +
'<td>' + frequency + '</td>' +
'<td>' + width + '</td>' +
'<td>' + str(lower_power_limit) + '</td>' +
'<td' + power_style + '>' + power + '</td>' +
'<td>' + str(upper_power_limit) + '</td>' +
'</tr>' +
'</table>' +
'<h3>Upstream Channel Graphs</h3>' +
'<img src="upstream_' + frequency + '_width.png"/><br/><br/>' +
'<img src="upstream_' + frequency + '_power.png"/>' +
'</body></html>'
)
with open(web_path + 'upstream_' + frequency + '.html', 'w') as f:
f.write(ch_page_contents)
# width
os.system(us_width_all_cmd)
# power
os.system(us_power_all_cmd)
index_page_us_summary_contents = index_page_us_summary_contents + str(
'</table>'
)
index_contents = index_contents + str(
index_page_ds_summary_contents +
index_page_us_summary_contents +
'<h3>Downstream Channels Graphs</h3>' +
'<img src="downstream_all_power.png"/><br/><br/>' +
'<img src="downstream_all_snr.png"/><br/><br/>' +
'<img src="downstream_all_corrected.png"/><br/><br/>' +
'<img src="downstream_all_uncorrected.png"/>' +
'<h3>Upstream Channels Graphs</h3>' +
'<img src="upstream_all_width.png"/><br/><br/>' +
'<img src="upstream_all_power.png"/>' +
'</body></html>'
)
with open(web_path + 'index.html', 'w') as f:
f.write(index_contents)
if __name__== '__main__':
main()
| StarcoderdataPython |
1757195 | # -*- coding: utf-8 -*-
from django import forms
from .models import File
class FileForm(forms.ModelForm):
class Meta:
model = File
fields = '__all__'
widgets = {
'link_type': forms.RadioSelect(attrs={'class': 'inline-block'}),
}
| StarcoderdataPython |
1710314 | from flask import render_template,request,Blueprint
from flask_fp.models import Post
main = Blueprint('main', __name__)
@main.route("/")
@main.route("/home")
def home():
page = request.args.get('page', 1, type = int)
posts = Post.query.order_by(Post.date_posted.desc()).paginate(page = page, per_page=5)
return render_template('HomePage.html', posts= posts)
@main.route("/about")
def about():
return render_template('AboutPage.html', title = 'About')
| StarcoderdataPython |
4801970 |
import smartpy as sp
class FA12(sp.Contract):
def __init__(self, admin):
self.init(paused = False, balances = sp.big_map(tvalue = sp.TRecord(approvals = sp.TMap(sp.TAddress, sp.TNat), balance = sp.TNat)), administrator = admin, totalSupply = 0)
@sp.entry_point
def transfer(self, params):
sp.verify((sp.sender == self.data.administrator) |
(~self.data.paused &
((params.fro == sp.sender) |
(self.data.balances[params.fro].approvals[sp.sender] >= params.value))))
self.addAddressIfNecessary(params.to)
sp.verify(self.data.balances[params.fro].balance >= params.value)
self.data.balances[params.fro].balance = sp.as_nat(self.data.balances[params.fro].balance - params.value)
self.data.balances[params.to].balance += params.value
sp.if (params.fro != sp.sender) & (self.data.administrator != sp.sender):
self.data.balances[params.fro].approvals[sp.sender] = sp.as_nat(self.data.balances[params.fro].approvals[sp.sender] - params.value)
@sp.entry_point
def approve(self, params):
sp.verify((sp.sender == self.data.administrator) |
(~self.data.paused & (params.f == sp.sender)))
alreadyApproved = self.data.balances[params.f].approvals.get(params.t, 0)
sp.verify((alreadyApproved == 0) | (params.amount == 0))
self.data.balances[params.f].approvals[params.t] = params.amount
@sp.entry_point
def setPause(self, params):
sp.verify(sp.sender == self.data.administrator)
self.data.paused = params
@sp.entry_point
def setAdministrator(self, params):
sp.verify(sp.sender == self.data.administrator)
self.data.administrator = params
@sp.entry_point
def mint(self, params):
sp.verify(sp.sender == self.data.administrator)
self.addAddressIfNecessary(params.to)
self.data.balances[params.to].balance += params.value
self.data.totalSupply += params.value
@sp.entry_point
def burn(self, params):
sp.verify(sp.sender == self.data.administrator)
sp.verify(self.data.balances[params.address].balance >= params.amount)
self.data.balances[params.address].balance = sp.as_nat(self.data.balances[params.address].balance - params.amount)
self.data.totalSupply = sp.as_nat(self.data.totalSupply - params.amount)
def addAddressIfNecessary(self, address):
sp.if ~ self.data.balances.contains(address):
self.data.balances[address] = sp.record(balance = 0, approvals = {})
@sp.entry_point
def getBalance(self, params):
sp.transfer(self.data.balances[params.owner].balance, sp.tez(0), sp.contract(sp.TNat, params.target).open_some())
@sp.entry_point
def getAllowance(self, params):
sp.transfer(self.data.balances[params.arg.owner].approvals[params.arg.spender], sp.tez(0), sp.contract(sp.TNat, params.target).open_some())
@sp.entry_point
def getTotalSupply(self, params):
sp.transfer(self.data.totalSupply, sp.tez(0), sp.contract(sp.TNat, params.target).open_some())
@sp.entry_point
def getAdministrator(self, params):
sp.transfer(self.data.administrator, sp.tez(0), sp.contract(sp.TAddress, params.target).open_some())
@sp.entry_point
def crzy(self, params):
sp.verify (sp.amount >= sp.tez(5))
sp.verify (sp.tez(sp.nat(params)) == sp.amount)
class SWAP(sp.Contract):
def __init__(self, admin, interested_party, fa12, tk_amount, tz_amount):
self.init(
admin = admin,
fa12 = fa12,
interested_party = interested_party,
tz_amount = sp.mutez(tz_amount),
tk_amount = sp.nat(tk_amount),
immutable = sp.bool(False)
)
@sp.entry_point
def delegate(self, params):
sp.verify(sp.sender == self.data.admin)
sp.set_delegate(params.addr)
@sp.entry_point
def claim(self, params):
sp.verify(sp.sender == self.data.interested_party)
self.transfer(params)
@sp.entry_point
def retrieve(self, params):
sp.verify((sp.balance == sp.tez(0)) & (sp.sender == self.data.admin))
self.transfer(params)
self.data.immutable = True
@sp.entry_point
def withdraw(self, params):
sp.verify(sp.sender == self.data.admin)
sp.send(params.to, sp.mutez(params.amount))
@sp.entry_point
def interest(self, params):
sp.verify(sp.amount >= self.data.tz_amount)
sp.verify(sp.amount == sp.mutez(params))
sp.verify(self.data.immutable == sp.bool(False))
self.data.immutable = sp.bool(True)
self.data.interested_party = sp.sender
def transfer(self, params):
arg = sp.TRecord(fro = sp.TAddress, to = sp.TAddress, value = sp.TNat)
arg_inst = sp.record(fro = sp.to_address(sp.self), to = params, value = self.data.tk_amount)
c = sp.contract(arg, self.data.fa12, entry_point="transfer").open_some()
sp.transfer(arg_inst, sp.mutez(0), c)
@sp.add_test(name = "SWAP Tests")
def test():
scenario = sp.test_scenario()
scenario.h1("FA1.2 Atomic Swap")
scenario.h3("Test wallets")
addr1 = sp.test_account("test1")
addr2 = sp.test_account("test2")
scenario.show([addr1, addr2])
scenario.h3("Initialize FA12")
c0 = FA12(addr1.address)
scenario.show([c0.address])
scenario += c0
scenario.h3("Mint")
scenario += c0.mint(to=addr1.address, value=20000).run(sender=addr1)
scenario.h3("Token owner initialize an atomic swap")
c2 = SWAP(addr1.address, addr1.address, c0.address, 200, 200000000)
scenario.show([c2.address])
scenario += c2
scenario.h3("Token owner gives permissions to SWAP smart contract")
scenario += c0.transfer(fro=addr1.address, to=c2.address, value=200).run(sender=addr1)
#scenario.h3("Test Retrieve")
#scenario += c2.retrieve(addr1.address).run(sender=addr1)
scenario.h3("An user fills the Swap Order")
scenario += c2.interest(200000000).run(sender=addr2, amount=sp.mutez(200000000))
scenario.h3("The very same user can manage those FA1.2 tokes through the Swap Contract")
scenario += c2.claim(addr2.address).run(sender=addr2)
scenario += c0
scenario.h3("Initial party withdraw funds")
scenario += c2.withdraw(to=addr1.address, amount=200000000).run(sender=addr1) | StarcoderdataPython |
1616243 | import os
import sys
import datetime
import torch
sys.path.append('../')
from models.model import *
from core.tune_labels import tune_labels
from utils.utils import get_data_loader, get_data_loader_weight, init_model, init_random_seed, get_dataset_root, get_model_root, get_data
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import shutil
import random
import numpy as np
from contextlib import redirect_stdout
data_mode_verbosity={
0: "MNIST->USPS",
1: "MNIST->USPS[0-4]",
2: "MNIST->USPS[5-9]",
3: "MNIST->USPS with mild shift",
4: "MNIST->USPS with strong shift"
}
for data_mode in [0,1,2]:
model_name = "mnist-usps-tune-source-only"
dataset_root = get_dataset_root()
model_root = os.path.expanduser(os.path.join('runs', model_name, data_mode_verbosity[data_mode]))
model_root = os.path.join(model_root, datetime.datetime.now().strftime('%m%d_%H%M%S'))
os.makedirs(model_root, exist_ok=True)
logname = model_root + '/log.txt'
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
self.log = open(logname, "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
#this flush method is needed for python 3 compatibility.
#this handles the flush command by doing nothing.
#you might want to specify some extra behavior here.
pass
sys.stdout = Logger()
class Config(object):
# params for path
dataset_root = dataset_root
model_root = model_root
config = os.path.join(model_root, 'config.txt')
finetune_flag = False
optimal = False
target_train_subsample_size = 50
data_mode=data_mode
# params for datasets and data loader
batch_size = 20
# params for source dataset
src_dataset = "mnist"
src_classifier_restore = os.path.join(model_root, src_dataset + '-source-classifier-final.pt')
class_num_src = 31
# params for target dataset
restore_root=os.path.expanduser(os.path.join('runs', 'mnist-source-only'))
tgt_dataset = "usps"
dann_restore = os.path.join(restore_root,'0807_201604/mnist_ training-mnist-final.pt')
# params for pretrain
num_epochs_src = 100
log_step_src = 10
save_step_src = 50
eval_step_src = 20
# params for training dann
gpu_id = '0'
## for digit
num_epochs = 560
log_step = 1
save_step = 25
eval_step = 1
## for office
# num_epochs = 1000
# log_step = 10 # iters
# save_step = 500
# eval_step = 5 # epochs
lr_adjust_flag = 'simple'
src_only_flag = False
manual_seed = 8888
alpha = 0
# params for optimizing models
lr = 1e-4
momentum = 0
weight_decay = 0
def __init__(self):
public_props = (name for name in dir(self) if not name.startswith('_'))
with open(self.config, 'w') as f:
for name in public_props:
f.write(name + ': ' + str(getattr(self, name)) + '\n')
params = Config()
logger = SummaryWriter(params.model_root)
# init random seed
init_random_seed(params.manual_seed)
# init device
device = torch.device("cuda:" + params.gpu_id)
source_weight, target_weight = get_data(params.data_mode)
src_data_loader, num_src_train = get_data_loader_weight(
params.src_dataset, params.dataset_root, params.batch_size, train=True, weights = source_weight)
src_data_loader_eval, _ = get_data_loader_weight(
params.src_dataset, params.dataset_root, params.batch_size, train=False, weights = source_weight)
tgt_data_loader, num_tgt_train = get_data_loader_weight(
params.tgt_dataset, params.dataset_root, params.batch_size,
train=True, subsample_size = params.target_train_subsample_size, weights = target_weight)
tgt_data_loader_eval, _ = get_data_loader_weight(
params.tgt_dataset, params.dataset_root, params.batch_size,
train=False, weights = target_weight)
# Cannot use the same sampler for both training and testing dataset
# load dann model
dann = init_model(net=MNISTmodel(), restore=params.dann_restore).to(device)
# freeze model but last layer
for param in dann.parameters():
param.requires_grad = False
dann.classifier[6] = nn.Linear(100, 10)
dann=dann.to(device)
# train dann model
print("Tuning mnist-source-only model")
dann = tune_labels(dann, params,src_data_loader,src_data_loader_eval, tgt_data_loader, tgt_data_loader_eval, num_tgt_train, device, logger)
| StarcoderdataPython |
1633404 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from datetime import time
from itertools import chain
from pandas import Timestamp
from pandas.tseries.holiday import AbstractHolidayCalendar, GoodFriday, USLaborDay, USPresidentsDay, USThanksgivingDay
from pytz import timezone
from .holidays_us import (Christmas, ChristmasEveBefore1993, ChristmasEveInOrAfter1993, USBlackFridayInOrAfter1993,
USIndependenceDay, USMartinLutherKingJrAfter1998, USMemorialDay, USJuneteenthAfter2022,
USNationalDaysofMourning, USNewYearsDay)
from .market_calendar import MarketCalendar
class CMEBaseExchangeCalendar(MarketCalendar, ABC):
"""
Base Exchange Calendar for CME.
CME Markets: https://www.cmegroup.com/markets/agriculture.html#overview
- Agriculture
- Energy
- Equity Index
- FX
- Interest Rates
- Metals
- Options
Holiays for which entire GLOBEX is closed:
- New Years Day
- Good Friday
- Christmas
Product Specific Closures:
- MLK Day
- Presidents Day
- Memorial Day
- Juneteenth
- US Independence Day
- US Labor Day
- US Thanksgiving Day
"""
@property
@abstractmethod
def name(self):
"""
Name of the market
:return: string name
"""
raise NotImplementedError()
@property
def tz(self):
return timezone('America/Chicago')
@property
def regular_holidays(self):
return AbstractHolidayCalendar(rules=[
USNewYearsDay,
GoodFriday,
Christmas,
])
# I can't find any reference to these special closings onther than NYSE
# @property
# def adhoc_holidays(self):
# return USNationalDaysofMourning
@property
def special_closes(self):
return [(
self.special_close_time,
AbstractHolidayCalendar(rules=[
USMartinLutherKingJrAfter1998,
USPresidentsDay,
USMemorialDay,
USJuneteenthAfter2022,
USLaborDay,
USIndependenceDay,
USThanksgivingDay,
USBlackFridayInOrAfter1993,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
])
)]
class CMEAgricultureExchangeCalendar(CMEBaseExchangeCalendar):
"""
Exchange calendar for CME for Agriculture products
Products:
- Grains and Oilseeds (same trading hours and holidays)
- Livestock
- Dairy
- Fertilizer
- Lumber and Softs
"""
# aliases = ['CME_Agriculture', 'CBOT_Agriculture', 'COMEX_Agriculture', 'NYMEX_Agriculture']
@property
#@abstractmethod #Would have prefered to keep this class abstract but it fails test_market_calendar.py
def name(self):
"""
Name of the market
:return: string name
"""
raise NotImplementedError()
class CMELivestockExchangeCalendar(CMEAgricultureExchangeCalendar):
"""
Exchange calendar for CME for Livestock products
https://www.cmegroup.com/trading/agricultural/livestock.html
GLOBEX Trading Times
https://www.cmegroup.com/markets/agriculture/livestock/live-cattle.contractSpecs.html
Monday - Friday: 8:30 a.m. - 1:05 p.m. CT
"""
aliases = ['CME_Livestock', 'CME_Live_Cattle', 'CME_Feeder_Cattle', 'CME_Lean_Hog', 'CME_Port_Cutout']
regular_market_times = {
"market_open": ((None, time(8, 30)),),
"market_close": ((None, time(13, 5)),)
}
@property
def name(self):
return "CME_Livestock"
@property
def regular_holidays(self):
return AbstractHolidayCalendar(rules=[
USNewYearsDay,
USMartinLutherKingJrAfter1998,
USPresidentsDay,
GoodFriday,
USMemorialDay,
USIndependenceDay,
USLaborDay,
USThanksgivingDay,
Christmas,
])
# @property
# def adhoc_holidays(self):
# return USNationalDaysofMourning
@property
def special_closes(self):
return [(
time(12, 5),
AbstractHolidayCalendar(rules=[
USBlackFridayInOrAfter1993,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
])
)]
class CMEEquityExchangeCalendar(CMEBaseExchangeCalendar):
aliases = ['CME_Equity', 'CBOT_Equity', '/ES', 'S&P500']
# Using CME Globex trading times
# https://www.cmegroup.com/markets/equities/sp/e-mini-sandp500.contractSpecs.html
regular_market_times = {
"market_open": ((None, time(17), -1),), # offset by -1 day
"market_close": ((None, time(16, 00)),)
#"break_start": ((None, time(17,45)),),
#"break_end": ((None, time(17,30)),)
}
@property
def name(self):
return "CME_Equity"
@property
def special_close_time(self):
return time(12, 30)
# For the bond market Good Friday that coincides with the release of NFP on the first friday of the month is an open day
goodFridayClosed = ['1970-03-27', '1971-04-09', '1972-03-31', '1973-04-20', '1974-04-12', '1975-03-28', '1976-04-16',
'1977-04-08', '1978-03-24', '1979-04-13', '1981-04-17', '1982-04-09', '1984-04-20', '1986-03-28',
'1987-04-17', '1989-03-24', '1990-04-13', '1991-03-29', '1992-04-17', '1993-04-09', '1995-04-14',
'1997-03-28', '1998-04-10', '2000-04-21', '2001-04-13', '2002-03-29', '2003-04-18', '2004-04-09',
'2005-03-25', '2006-04-14', '2008-03-21', '2009-04-10', '2011-04-22', '2013-03-29', '2014-04-18',
'2016-03-25', '2017-04-14', '2018-03-30', '2019-04-19', '2020-04-10', '2022-04-15', '2024-03-29',
'2025-04-18', '2027-03-26', '2028-04-14', '2029-03-30', '2030-04-19', '2031-04-11', '2032-03-26',
'2033-04-15', '2035-03-23', '2036-04-11', '2038-04-23', '2039-04-08', '2040-03-30', '2041-04-19',
'2043-03-27', '2044-04-15', '2046-03-23', '2047-04-12', '2049-04-16', '2050-04-08', '2051-03-31',
'2052-04-19', '2054-03-27', '2055-04-16', '2056-03-31', '2057-04-20', '2058-04-12', '2059-03-28',
'2060-04-16', '2061-04-08', '2062-03-24', '2063-04-13', '2065-03-27', '2066-04-09', '2068-04-20',
'2069-04-12', '2070-03-28', '2071-04-17', '2072-04-08', '2073-03-24', '2074-04-13', '2076-04-17',
'2077-04-09', '2079-04-21', '2081-03-28', '2082-04-17', '2084-03-24', '2085-04-13', '2086-03-29',
'2087-04-18', '2088-04-09', '2090-04-14', '2092-03-28', '2093-04-10', '2095-04-22', '2096-04-13',
'2097-03-29', '2098-04-18', '2099-04-10']
BondsGoodFridayClosed = [Timestamp(x, tz='UTC') for x in goodFridayClosed]
goodFridayOpen = ['1980-04-04', '1983-04-01', '1985-04-05', '1988-04-01', '1994-04-01', '1996-04-05', '1999-04-02',
'2007-04-06', '2010-04-02', '2012-04-06', '2015-04-03', '2021-04-02', '2023-04-07', '2026-04-03',
'2034-04-07', '2037-04-03', '2042-04-04', '2045-04-07', '2048-04-03', '2053-04-04', '2064-04-04',
'2067-04-01', '2075-04-05', '2078-04-01', '2080-04-05', '2083-04-02', '2089-04-01', '2091-04-06',
'2094-04-02']
BondsGoodFridayOpen = [Timestamp(x, tz='UTC') for x in goodFridayOpen]
class CMEBondExchangeCalendar(MarketCalendar):
"""
Exchange calendar for CME for Interest Rate and Bond products
The Holiday calendar is different between the open outcry trading floor hours and GLOBEX electronic trading hours.
This calendar attempts to be accurate for the GLOBEX holidays and hours from approx 2010 onward.
"""
aliases = ['CME_Rate', 'CBOT_Rate', 'CME_InterestRate', 'CBOT_InterestRate', 'CME_Bond', 'CBOT_Bond']
regular_market_times = {
"market_open": ((None, time(17), -1),), # offset by -1 day
"market_close": ((None, time(16)),)
}
@property
def name(self):
return "CME_Bond"
@property
def tz(self):
return timezone('America/Chicago')
@property
def regular_holidays(self):
return AbstractHolidayCalendar(rules=[
USNewYearsDay,
Christmas,
])
@property
def adhoc_holidays(self):
return list(chain(USNationalDaysofMourning, BondsGoodFridayClosed))
@property
def special_closes(self):
return [
(time(12),
AbstractHolidayCalendar(rules=[
USMartinLutherKingJrAfter1998,
USPresidentsDay,
USMemorialDay,
USIndependenceDay,
USLaborDay,
USThanksgivingDay,
])),
(time(12, 15),
AbstractHolidayCalendar(rules=[
USBlackFridayInOrAfter1993,
ChristmasEveBefore1993,
ChristmasEveInOrAfter1993,
]))
]
@property
def special_closes_adhoc(self):
return [
(time(10, tzinfo=self.tz), BondsGoodFridayOpen)
]
| StarcoderdataPython |
1601114 | # coding=utf8
#
# (C) 2015-2016, MIT License
'''
Container for tests.
'''
| StarcoderdataPython |
177212 | <filename>python/day25.py
import io
EXAMPLE = """v...>>.vv>
.vv>>.vv..
>>.>v>...v
>>v>>.>.v.
v>v.vv.v..
>.>>..v...
.vv..>.>v.
v.v..>>v.v
....v..v.>"""
def step(map: list[list[str]]) -> bool:
m = len(map)
n = len(map[0])
move_east = set()
for i in range(m):
for j in range(n):
if map[i][j] == ">" and map[i][(j + 1) % n] == ".":
move_east.add((i, j))
for (i, j) in move_east:
map[i][j] = "."
map[i][(j + 1) % n] = ">"
move_south = set()
for i in range(m):
for j in range(n):
if map[i][j] == "v" and map[(i + 1) % m][j] == ".":
move_south.add((i, j))
for (i, j) in move_south:
map[i][j] = "."
map[(i + 1) % m][j] = "v"
return len(move_east) > 0 or len(move_south) > 0
def solve(reader: io.TextIOBase) -> int:
map = []
for line in reader.readlines():
map.append(list(line.strip()))
result = 1
while step(map):
result += 1
return result
assert solve(io.StringIO(EXAMPLE)) == 58
def main():
with open("input/day25.txt") as file:
result = solve(file)
print(f"On step {result} nobody moves")
if __name__ == "__main__":
main()
| StarcoderdataPython |
3252512 | import io
import re
from setuptools import setup
from collections import OrderedDict
with io.open('README.md', 'rt', encoding='utf8') as f:
readme = f.read()
with io.open('TimePoints/__init__.py', 'rt', encoding='utf8') as f:
version = re.search(r'__version__ = \'(.*?)\'', f.read()).group(1)
setup(
name='TimePoints',
version=version,
url='https://github.com/htarnacki/TimePoints',
project_urls=OrderedDict((
('Code', 'https://github.com/htarnacki/TimePoints'),
('Issue tracker', 'https://github.com/htarnacki/TimePoints/issues'),
)),
description='Easily measure durations between time points in a code',
long_description=readme,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['TimePoints'],
keywords=['time', 'duration', 'time points', 'measure', 'debug'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Topic :: System :: Monitoring',
'Topic :: System :: Logging',
'Topic :: Software Development :: Debuggers'
],
python_requires='>=3.7.0',
extras_require=dict(
reports=[
'rich'
]
)
)
| StarcoderdataPython |
3342389 | <filename>cql_builder/assignment.py
from cql_builder.base import Assignment, ValidationError
# {key=value, key=value, ...}
class Set(Assignment):
def __init__(self, **kwargs):
self.kwargs = kwargs
@property
def cql(self):
return ', '.join('{}=%s'.format(k) for k in self.kwargs.keys())
@property
def values(self):
return self.kwargs.values()
# names['foo'] = 'bar'
# names[2] = 'foo'
class SetAt(Assignment):
def __init__(self, name, key, value):
self.name = name
self.key = key
self.value = value
@property
def cql(self):
return '{}[%s] = %s'.format(self.name)
@property
def values(self):
return [self.key, self.value]
# name = name + {value, value, ...}
# name = name + [value, value, ...]
class Add(Assignment):
def __init__(self, name, value):
self.name = name
self.value = value
@property
def cql(self):
return '{}={} + %s'.format(self.name, self.name)
@property
def values(self):
return [self.value]
# name = name - {value, value, ...}
# name = name - [value, value, ...]
class Subtract(Assignment):
def __init__(self, name, value):
self.name = name
self.value = value
@property
def cql(self):
return '{}={} - %s'.format(self.name, self.name)
@property
def values(self):
return [self.value]
# assignment, assignment, ...
class Assignments(Assignment):
def __init__(self):
self.assignments = []
def add(self, *assignment):
self.assignments.extend(assignment)
@property
def cql(self):
return ', '.join(assign.cql for assign in self.assignments)
@property
def values(self):
value_list = []
for assign in self.assignments:
value_list.extend(assign.values)
return value_list
def validate(self):
if not self.assignments:
raise ValidationError('assignments is empty')
for assign in self.assignments:
if assign is None:
raise ValidationError('assignment: {}'.format(assign))
if not isinstance(assign, Assignment):
raise ValidationError('assignment {!r} must be of type Assignment'.format(assign))
| StarcoderdataPython |
1783211 | from urls import app
| StarcoderdataPython |
3352639 | <filename>pacote-download/ex112/utilidadescev/teste.py
from ex112.utilidadescev import dado
from ex112.utilidadescev import moeda
p = dado.leiaDinheiro('Digite o preco: R$')
moeda.resumo(p,35,22) | StarcoderdataPython |
136279 | #!/usr/bin/env python
'''
This script prints Hello World!
'''
print('Hello, World!')
| StarcoderdataPython |
3251663 | from fvh import MyTurtle
import math
def one(starth=270, startpos=(0,0), lm=None, cube=[60,60]):
if not lm:
lm=MyTurtle()
lm.tracer(False)
lm.pu()
lm.goto(startpos)
lm.seth(starth)
lm.pd()
lm.ht()
lm.fd(5)
lm.right(90)
lm.fd(10)
lm.left(90)
lm.fd(40)
lm.left(90)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(30)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(10)
lm.left(90)
lm.fd(40)
lm.left(90)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(30)
lm.tracer(True)
def two(starth=270, startpos=(0,0), lm=None):
if not lm:
lm=MyTurtle()
lm.tracer(False)
lm.pu()
lm.goto(startpos)
lm.seth(starth)
lm.pd()
lm.ht()
lm.fd(5)
lm.right(90)
lm.fd(10)
lm.left(90)
lm.fd(40)
lm.left(90)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(60)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(10)
lm.left(90)
lm.fd(40)
lm.left(90)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(60)
lm.pu()
lm.rt(180)
lm.fd(20)
lm.left(90)
lm.fd(5)
lm.pd()
lm.fd(40)
lm.right(90)
lm.fd(20)
lm.right(90)
lm.fd(40)
lm.right(90)
lm.fd(20)
lm.tracer(True)
def three(starth=270, startpos=(0,0), lm=None):
if not lm:
lm=MyTurtle()
lm.tracer(False)
lm.pu()
lm.goto(startpos)
lm.seth(starth)
lm.pd()
lm.ht()
lm.fd(5)
lm.right(90)
lm.fd(10)
lm.left(90)
lm.fd(40)
lm.left(90)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(90)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(10)
lm.left(90)
lm.fd(40)
lm.left(90)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(90)
lm.pu()
lm.rt(180)
lm.fd(20)
lm.left(90)
lm.fd(5)
lm.pd()
lm.fd(40)
lm.right(90)
lm.fd(20)
lm.right(90)
lm.fd(40)
lm.right(90)
lm.fd(20)
lm.pu()
lm.rt(180)
lm.fd(30)
lm.left(90)
lm.pd()
lm.fd(40)
lm.right(90)
lm.fd(20)
lm.right(90)
lm.fd(40)
lm.right(90)
lm.fd(20)
lm.tracer(True)
def five(starth=270, startpos=(0,0), lm=None):
if not lm:
lm=MyTurtle()
lm.tracer(False)
lm.pu()
lm.goto(startpos)
lm.seth(starth)
lm.pd()
lm.ht()
lm.fd(5)
lm.right(90)
lm.fd(10)
top0=lm.pos()
topheading=lm.heading()
theta=math.degrees(math.asin(40.0/((15.0**2+40.0**2)**0.5)))
lm.seth(topheading+theta)
lm.fd((15.0**2+40.0**2)**0.5)
lm.seth(topheading-180)
lm.fd(25)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(60)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(25)
lm.seth(topheading-theta)
lm.fd((15.0**2+40.0**2)**0.5)
lm.seth(topheading)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(60)
lm.pu()
lm.right(180)
lm.fd(20)
lm.left(90)
lm.fd(5)
lm.pd()
innertheta=math.degrees(math.asin(30/((10.0**2+30.0**2)**0.5)))
lm.seth(topheading+innertheta)
lm.fd((10.0**2+30.0**2)**0.5)
lm.seth(topheading-innertheta)
lm.fd((10.0**2+30.0**2)**0.5)
lm.seth(topheading-180.0)
lm.fd(20)
lm.tracer(True)
def ten(starth=270, startpos=(0,0), lm=None):
if not lm:
lm=MyTurtle()
lm.tracer(False)
lm.pu()
lm.goto(startpos)
lm.seth(starth)
lm.pd()
lm.ht()
lm.fd(5)
lm.right(90)
lm.fd(10)
topheading=lm.heading()
outtertheta=math.degrees(math.asin(25.0/((15.0**2+25.0**2)**0.5)))
lm.seth(topheading+outtertheta) #top right
lm.fd((15.0**2+25.0**2)**0.5)
lm.seth(topheading-(180+outtertheta)) # middle right
lm.fd((15.0**2+25.0**2)**0.5)
lm.seth(topheading-180)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(60)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(10)
lm.seth(topheading+(180+outtertheta)) # bottom left
lm.fd((15.0**2+25.0**2)**0.5)
lm.seth(topheading-outtertheta) # middle left
lm.fd((15.0**2+25.0**2)**0.5)
lm.seth(topheading)
lm.fd(10)
lm.right(90)
lm.fd(5)
lm.right(90)
lm.fd(60)
lm.pu()
lm.right(180)
lm.fd(20)
lm.left(90)
lm.fd(5)
lm.right(90)
lm.pd()
lm.fd(20)
lm.seth(180+(topheading-outtertheta))
lm.fd((2.0/3.0)*((15.0**2+25.0**2)**0.5))
lm.seth(topheading+(180+outtertheta))
lm.fd((2.0/3.0)*((15.0**2+25.0**2)**0.5))
lm.pu()
lm.seth(90+topheading)
lm.fd(50)
lm.pd()
lm.seth(topheading)
lm.fd(20)
lm.seth(topheading+(180+outtertheta))
lm.fd((2.0/3.0)*((15.0**2+25.0**2)**0.5))
lm.seth(180+(topheading-outtertheta))
lm.fd((2.0/3.0)*((15.0**2+25.0**2)**0.5))
lm.tracer(True)
def fifty(starth=270, startpos=(0,0), lm=None):
if not lm:
lm=MyTurtle()
lm.ht()
lm.tracer(False)
lm.pu()
lm.goto(startpos)
lm.seth(starth)
lm.fd(35)
lm.pd()
lm.fd(15)
lm.right(90)
lm.fd(30)
lm.right(90)
lm.fd(50)
lm.right(90)
lm.fd(10)
lm.right(90)
lm.fd(40)
lm.left(90)
lm.fd(15)
lm.left(45)
lm.fd(50**0.5)
lm.tracer(True)
| StarcoderdataPython |
168494 | <gh_stars>0
import networkx as nx
from networkx.readwrite import json_graph
import pickle
from random import choice,uniform,randint
import http_server
import json
import math
from itertools import combinations
class Optimize:
G=None
F=None
def prune(self,G):
tbd=[]
for n in Optimize.G.nodes():
if sum(G.node[n]['alist'])==0 or sum(G.node[n]['ilist'])==0:
tbd.append(n)
for t in tbd:
# print t
G.remove_node(t)
def remove_isolated(self,G):
iso=[]
for n in G.nodes():
if G.degree(n)==0:
iso.append(n)
for i in iso:
# print i
G.remove_node(i)
def makeallowed(self,allowedlist):
A=open('allowed.txt')
for w in A.readlines():
allowedlist.append(w.strip())
def filterallowed(self,allowedlist,income,age):
tbd=[]
for gene in Optimize.G.nodes():
if str(gene) in allowedlist:
if self.minimuminc(gene,income)==False or self.ageconst(gene,age)==False:
tbd.append(str(gene))
for t in tbd:
allowedlist.remove(t)
for s in allowedlist:
print s
def allowed(self,allowedlist,site):
if str(site) in allowedlist:
return True
else:
return False
def C(self,n):
return (Optimize.G.node[n]['reach']*4.5)+0.5;
def D(self,i,j):
return math.pow(10,(-1*Optimize.F.get(i).get(j)))
# def U(n):
# world=3000000000
# return G.node[n]['reach']*(world/100)
def I(self,n,size):
Freq=Optimize.G.node[n]['pageviews']
Freq=1 #NOT TAKING PAGEVIEWS PER USER INTO ACCOUNT
return ((1000*self.budget)/(size*self.C(n)*Freq));
def fitness(self,S):
#S is a chromosome
L=len(S)
outsum=0
overlap=0
for i in range(1,L):
insum=0
for j in range(i+1,L):
insum+= self.D(S[i],S[j])* min(self.I(S[i],L),self.I(S[j],L))
outsum+=self.I(S[i],L)-insum
overlap+=insum
return (outsum,overlap)
def fitnesspath(self,S): #PATH
#S is a chromosome
L=len(S)
outsum=0
overlap=0
for i in range(0,L):
allpaths=[]
for j in range(i+1,L):
path=nx.shortest_path(Optimize.G, source=S[i], target=S[j], weight='neglog')
allpaths.append(path)
delpaths=[]
# print "All", allpaths
for a, b in combinations(allpaths, 2):
str1 = ''.join(a)
str2 = ''.join(b)
if str1 in str2:
delpaths.append(b)
elif str2 in str1:
delpaths.append(a)
for d in delpaths:
if d in allpaths:
allpaths.remove(d)
# print "Del", delpaths
insum=0
for p in allpaths:
l=len(p)
insum+= self.D(p[0],p[l-1])* min(self.I(p[0],L),self.I(p[l-1],L))
outsum+=self.I(S[i],L)-insum
overlap+=insum
# print "self.fitness ", outsum, " Overlap ", insum
return (outsum,overlap)
def weighted_choice(self,choices):
total = sum(w for c, w in choices)
r = uniform(0, total)
upto = 0
for c, w in choices:
if upto + w >= r:
return c
upto += w
assert False, "Error"
def population_generate_random(self,allowedlist,P,size,income,age):
#P is population of parents
#size is size of each chromosome
population=[]
i=0
while (i<P):
chromosome=[]
while (True):
gene = choice(Optimize.G.nodes())#random node
if not self.allowed(allowedlist,gene):
continue
# if minimuminc(gene,income)==False or ageconst(gene,age)==False:
# continue
if gene not in chromosome:
chromosome.append(gene)
if(len(chromosome)==size):
break
chromosome=sorted(chromosome, key= lambda node: Optimize.G.node[node]['reach'])
ch=tuple(chromosome)
if ch not in population:
population.append(ch)
i=i+1
for p in population:
print p
return population
def population_generate_weighted(self,allowedlist,P,size,income,age):
sortednodes=sorted(Optimize.G.nodes(), key= lambda node: Optimize.G.node[node]['reach'])
choices=[]
for n in sortednodes:
choices.append((n,Optimize.G.node[n]['reach']))
population=[]
i=0
while (i<P):
chromosome=[]
while (True):
gene = self.weighted_choice(choices)#random node
if not self.allowed(allowedlist,gene):
continue
# if self.minimuminc(gene,income)==False or self.ageconst(gene,age)==False:
# continue
# print G.node[gene]['reach']
if gene not in chromosome:
chromosome.append(gene)
if(len(chromosome)==size):
break
chromosome=sorted(chromosome, key= lambda node: Optimize.G.node[node]['reach'])
ch=tuple(chromosome)
# ch.sort()
if ch not in population:
population.append(ch)
i=i+1
for p in population:
print p
return population
def replace(self,l, X, Y):
for i,v in enumerate(l):
if v == X:
l.pop(i)
l.insert(i, Y)
def pickparents(self,population):
parents=[]
choices=[]
sortedpopulation=sorted(population, key= lambda ch: self.fitness(ch)[0])
for ch in sortedpopulation:
choices.append((ch,self.fitness(ch)[0]))
i=0
while(i<2):
p=self.weighted_choice(choices)
# if p not in parents:
parents.append(p)
i=i+1
return parents
def makechild(self,allowedlist,population, parents,income,age,mut):
choices=[]
child=[]
size=len(parents[0])
sortedparents=sorted(parents, key= lambda ch: self.fitness(ch)[0])
for ch in sortedparents:
choices.append((ch,self.fitness(ch)[0]))
i=0
while i<size:
p=self.weighted_choice(choices)
g=choice(p)
r=randint(1,100)
if mut==5:
if r==1 or r==2 or r==3 or r==4 or r==5:
g=choice(Optimize.G.nodes())
if not self.allowed(allowedlist,g):
continue
# if minimuminc(g,income)==False or ageconst(g,age)==False:
# continue
print "Mutation"
if mut==3:
if r==1 or r==2 or r==3:
g=choice(Optimize.G.nodes())
if not self.allowed(allowedlist,g):
continue
# if minimuminc(g,income)==False or ageconst(g,age)==False:
# continue
print "Mutation"
if mut==1:
if r==1:
g=choice(Optimize.G.nodes())
if not self.allowed(allowedlist,g):
continue
# if minimuminc(g,income)==False or ageconst(g,age)==False:
# continue
print "Mutation"
if g not in child:
child.append(g)
i=i+1
child=tuple(child)
FP0=self.fitness(parents[0])
FP1=self.fitness(parents[1])
FC=self.fitness(child)
if child==parents[0] and child==parents[1]:
return
print parents[0] , " self.fitness: ", FP0[0], " Overlap: ", FP0[1]
print parents[1] , " self.fitness: ", FP1[0], " Overlap: ", FP1[1]
print child, " self.fitness: ", FC[0], " Overlap: ", FC[1]
if min(FP0[0],FP1[0],FC[0])==FP0[0]:
print "replaced: " ,parents[0]
self.replace(population,parents[0],child)
elif min(FP0[0],FP1[0],FC[0])==FP1[0]:
print "replaced: " ,parents[1]
self.replace(population,parents[1],child)
else:
print "No replacement"
def minimuminc(self,site,inc):
#inc can take values 0,30,60 or 100. 0 means no restriction
# (0-30)(30-60)(60-100)(100+)
if inc==0:
return True
if inc==30:
if sum(Optimize.G.node[site]['ilist'][1:])>=300:
return True
else :
return False
if inc==60:
if sum(Optimize.G.node[site]['ilist'][2:])>=200:
return True
else:
return False
if inc==100:
if sum(Optimize.G.node[site]['ilist'][3])>=100:
return True
else:
return False
def ageconst(self,site,age):
#age can take values 1)18-24 2)25-34 3)35-44 4)45-54 5)55-64 6)65+
# 0 means no restriction
if age==0:
return True
else:
if Optimize.G.node[site]['alist'][age]>=100:
return True
else :
return False
def __init__(self, psize, csize,inc,age,mut,probselect,iteration,budget):
self.budget=budget
self.psize=psize
self.csize=csize
self.inc=inc
self.age=age
self.mut=mut
self.probselect=probselect
self.iteration=iteration
Optimize.G = pickle.load(open('saved/graph300.txt'))
self.prune(Optimize.G)
self.remove_isolated(Optimize.G)
for u,v,attr in Optimize.G.edges(data=True):
Optimize.G.edge[u][v]['neglog']= -1*math.log10(Optimize.G.edge[u][v]['weight'])
Optimize.F=nx.floyd_warshall(Optimize.G, weight='neglog')
def calculate(self):
# d = json_graph.node_link_data(G)
# json.dump(d, open('force/force.json','w'))
# http_server.load_url('force/force.html')
allowedlist=[]
self.makeallowed(allowedlist)
self.filterallowed(allowedlist,self.inc,self.age)
pop=[]
if self.probselect == 0:
print '\n\nRandom\n\n'
pop=self.population_generate_random(allowedlist,self.psize,self.csize,self.inc,self.age)
if self.probselect == 1:
print '\n\nWeighted\n\n'
pop = self.population_generate_weighted(allowedlist, self.psize,self.csize,self.inc,self.age)
self.fitnesscurve=[]
data=[]
data.append(['Chromosome', 'self.fitness','Overlap'])
for i in range(0,self.iteration): #ITERATIONS
print "\n\n", i+1, "\n\n"
par= self.pickparents(pop)
self.makechild(allowedlist,pop,par,self.inc,self.age,self.mut)
sortedpop=sorted(pop, key= lambda ch: self.fitness(ch)[0], reverse=True)
print "fittest: "
F=self.fitness(sortedpop[0])
print sortedpop[0], "self.fitness: ", F[0], "Overlap ", F[1]
data.append([sortedpop[0], F[0], F[1]])
return data
| StarcoderdataPython |
1701172 | '''
author: <NAME> || @slothfulwave612
Python module for i/o operations on the dataset.
'''
## import necessary packages/modules
import os
import numpy as np
import pandas as pd
from pandas.io.json import json_normalize
import json
import math
import multiprocessing
from tqdm.auto import tqdm, trange
import statsmodels.api as sm
def get_competition(path):
'''
Function for getting data about all the competitions.
Argument:
path -- str, path to competition.json file.
Returns:
comp_df -- pandas dataframe, all competition data.
'''
## load the json file
comp_data = json.load(open(path))
## make pandas dataframe
comp_df = pd.DataFrame(comp_data)
return comp_df
def flatten_json(sub_str):
'''
Function to take out values from nested dictionary present in
the json file, so to make a representable dataframe.
---> This piece of code was found on stackoverflow <--
Argument:
sub_str -- substructure defined in the json file.
Returns:
flattened out information.
'''
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
flatten(sub_str)
return out
def get_matches(comp_id, season_id, path):
'''
Function for getting match-data for a given competition
Arguments:
comp_id -- int, the competition id.
season_id -- int, the season id.
path -- str, path to .json file containing match data.
Returns:
match_df -- pandas dataframe, containing all the matches
'''
## loading up the data from json file
match_data = json.load(open(path, encoding='utf8'))
## flattening the json file
match_flatten = [flatten_json(x) for x in match_data]
## creating a dataframe
match_df = pd.DataFrame(match_flatten)
match_df_cols = list(match_df.columns)
## renaming the dataframe
for i in range(len(match_df_cols)):
if match_df_cols[i].count('away_team') == 2:
## for away_team columns
match_df_cols[i] = match_df_cols[i][len('away_team_'):]
elif match_df_cols[i].count('_0') == 1:
## for _0 columns
match_df_cols[i] = match_df_cols[i].replace('_0', '')
elif match_df_cols[i].count('competition') == 2:
## for competition columns
match_df_cols[i] = match_df_cols[i][len('competition_'):]
elif match_df_cols[i].count('home_team') == 2:
## for away_team columns
match_df_cols[i] = match_df_cols[i][len('home_team_'):]
elif match_df_cols[i].count('season') == 2:
## for away_team columns
match_df_cols[i] = match_df_cols[i][len('season_'):]
match_df.columns = match_df_cols
return match_df
def make_event_df(match_id, path):
'''
Function for making event dataframe.
Argument:
match_id -- int, the required match id for which event data will be constructed.
path -- str, path to .json file containing event data.
Returns:
df -- pandas dataframe, the event dataframe for the particular match.
'''
## read in the json file
event_json = json.load(open(path, encoding='utf-8'))
## normalize the json data
df = json_normalize(event_json, sep='_')
return df
def full_season_events(match_df, match_ids, path, comp_name=None, leave=True, shot="basic"):
'''
Function to make event dataframe for a full season.
Arguments:
match_df -- pandas dataframe, containing match-data.
match_id -- list, list of match id.
path -- str, path to directory where .json file is listed.
e.g. '../input/Statsbomb/data/events'
comp_name -- str, competition name + season name, default: None.
leave -- keeps all traces of the progressbar upon termination of iteration.
Returns:
event_df -- pandas dataframe, containing event data for the whole season.
'''
## init an empty dataframe
event_df = pd.DataFrame()
if comp_name == None:
t = match_ids
else:
t = tqdm(match_ids, desc=f'Grabbing data for {comp_name}', position=0, leave=leave)
for match_id in t:
## .json file
temp_path = path + f'/{match_id}.json'
temp_df = make_event_df(match_id, temp_path)
event_df = pd.concat([event_df, temp_df], sort=False)
if shot == "basic":
return event_df.loc[event_df['type_name'] == 'Shot']
elif shot == "intermediate":
return intermediate_dataset(event_df)
elif shot == "advance":
return intermediate_dataset(event_df, adv=True)
def multiple_season_event_df(comp_name, comp_id, season_ids, path_match, path_season, shot):
'''
Function for making event dataframe having multile seasons
for the same competition.
Arguments:
comp_name -- str, competition name + season
comp_id -- int, competition id.
season_ids -- list, list containing season ids.
path_match -- str, path to .json file containing match data.
path_season -- str, path to directory where .json file is listed.
e.g. '../input/Statsbomb/data/events'
Returns:
event_df -- pandas dataframe, containing event of multiple seasons.
'''
## init an empty dataframe
event_df = pd.DataFrame()
## making the event-dataframe
for season_id in tqdm(season_ids, desc=f'Grabbing data for {comp_name}', leave=True):
## add season id to path-match
team_path_match = path_match + f'/{comp_id}/{season_id}.json'
## make a match dataframe for a particular season
match_df = get_matches(comp_id, season_id, team_path_match)
## list all the match ids
match_ids = list(match_df['match_id'].unique())
comp_name_ = match_df['competition_name'].unique()[0] + '-' + match_df['season_name'].unique()[0]
## create the event dataframe for the whole season
temp_df = full_season_events(match_df, match_ids, path_season, comp_name=comp_name_, leave=False, shot=shot)
## add competition
temp_df["comp_name"] = comp_name_
## concat the dataframes
event_df = pd.concat([event_df, temp_df], sort=False)
## make final dataframe
event_df = event_df.reset_index(drop=True)
return event_df
def goal(value):
'''
Function to output 1: if goal or 0: otherwise.
Arguments:
value -- str, shot-outcome-name.
Returns:
0 or 1 -- 0 means no goal 1 means goal.
'''
if value == 'Goal':
return 1
else:
return 0
def body_part(value):
'''
Function to output: Head -- if it is a header,
Foot -- if it is right/left foot,
Other -- if any other body part
'''
if value == "Left Foot" or value == "Right Foot":
return "Foot"
else:
return value
def change_dims(old_value, old_min, old_max, new_min, new_max):
'''
Function for changing the coordinates to our pitch dimensions.
Arguments:
old_value, old_min, old_max, new_min, new_max -- float values.
Returns:
new_value -- float value(the coordinate value either x or y).
'''
## calculate the value
new_value = ( (old_value - old_min) / (old_max - old_min) ) * (new_max - new_min) + new_min
return new_value
def coordinates_x(value):
'''
Return x coordinate
'''
value_x = change_dims(value[0], 0, 120, 0, 104)
return value_x
def coordinates_y(value):
'''
Return 80 - x coordinate
'''
value_y = change_dims(80- value[1], 0, 80, 0, 68)
return value_y
def distance_bw_coordinates(x1, y1, x2=104.0, y2=34.0):
'''
Function for calculating the distance between shot location
and the goal post.
Arguments:
x1, y1 -- float, the x and y coordinate for shot location.
x2, y2 -- float, the x and y coordinate for the goal post location.(default for Statsbomb defined goal-post)
'''
diff_sqr_x = (x2 - x1)**2
diff_sqr_y = (y2 - y1)**2
distance = math.sqrt(diff_sqr_x + diff_sqr_y) ## euclidean distnace
return distance
def post_angle(x, y, g1_x=104, g1_y=30.34, g2_x=104, g2_y=37.66):
'''
Function to calculate the post angle.
Arguments:
x -- float, x coordinate from where the shot was taken.
y -- float, y coordinate from where the shot was taken.
g1 and g2 are the coordinates of the two woodwork, default values
specifying the woodwork coordinate for Statsbomb data.
Returns:
angle -- float, the angle in degrees.
'''
if x == 104 and (30.34 <= y <= 37.66):
return 180
if x == 104 and (y > 37.66 or y < 30.34):
return 0
## calculating the three sides of the triangle.
A_dis = distance_bw_coordinates(x, y, g1_x, g1_y)
B_dis = distance_bw_coordinates(x, y, g2_x, g2_y)
C_dis = distance_bw_coordinates(g1_x, g1_y, g2_x, g2_y)
## using cosine law
value = ((A_dis**2) + (B_dis**2) - (C_dis**2)) / (2 * A_dis * B_dis)
angle = np.degrees(np.arccos(value))
return angle
def create_result_df(df, length, col):
'''
Function to create a result dataframe(statsbomb_xg vs predicted_xg).
Arguments:
df -- pandas dataframe.
length -- int, length of the dataframe.
col -- str, column name for predicted xG value.
Returns:
result -- pandas dataframe containing statsbomb_xg and predicted_xg as columns.
'''
## fetch all the player names
players = df.loc[df['target'] == 1, 'player_name'].value_counts()[:length].index
## init a dictionary
result_dict = {
'player_name': [],
'shots': [],
'goals': [],
'statsbomb_xg': [],
'predicted_xg': []
}
## calculate required values
for player in players:
## total number of shots taken by a player
shots = len(df.loc[(df['player_name'] == player)])
## total number of goals scored by a player
goals = len(df.loc[
(df['player_name'] == player) &
(df['target'] == 1)
])
## aggregated statsbomb-xG-value for a player
stats_xg = df.loc[
(df['player_name'] == player),
'shot_statsbomb_xg'
].sum()
## aggregated predicted-xG-value for a player
pred_xg = df.loc[
(df['player_name'] == player),
col
].sum()
## append result to result_dict
result_dict['player_name'].append(player)
result_dict['shots'].append(shots)
result_dict['goals'].append(goals)
result_dict['statsbomb_xg'].append(stats_xg)
result_dict['predicted_xg'].append(pred_xg)
## create pandas dataframe
result = pd.DataFrame(result_dict).sort_values(by='goals', ascending=False).reset_index(drop=True)
return result
def get_indices(width, height, xpartition, ypartition, xinput, yinput):
"""
Function to get the indices for grid.
Args:
width (float): width of the pitch.
height (float): height of the pitch.
xpartition (int): number of rows in a grid
ypartition (int): number of colimns in a grid.
xinput (float): x-coordinate location.
yinput (float): y-coordinate location.
Returns:
tuple: containing indices for the grid.
"""
## calculate number of partitions in x and y
x_step = width / xpartition
y_step = height / ypartition
## calculate x and y values
x = math.ceil((xinput if xinput > 0 else 0.5) / x_step) # handle border cases as well
y = math.ceil((yinput if yinput > 0 else 0.5) / y_step) # handle border cases as well
return (
ypartition - y, x - 1
)
def get_stats(x_val, y_val):
"""
Function to train model using statsmodel api.
Args:
x_val (pandas.DataFrame): containing features.
y_val (numpy.ndarray): containing targets.
Returns:
statsmodels.iolib.summary.Summary: summary about our model
"""
## train logistic model
log_reg = sm.Logit(y_val, x_val).fit(maxiter=1000)
return log_reg
def make_df(df, cols, rows=25):
"""
Function to make the required dataframe.
Args:
df (pandas.DataFrame))
cols (list): the required columns.
rows (int, optional): First rows. Defaults to 25.
"""
## fetch columns
df = df[cols]
## a new dataframe
new_df = df.groupby(by="player_name").sum().reset_index().sort_values("target", ascending=False).reset_index(drop=True)
## rename target column
new_df = new_df.rename({"target": "goals_scored"}, axis=1)
## fetch first few rows
first_few = new_df.head(rows)
return first_few
def area(x1, y1, x2, y2, x3, y3):
"""
Funtion to calculate area of triangle.
Args:
float: coordinates for triangle vertices.
Returns:
float: area of the triangle.
"""
return abs((x1 * (y2 - y3) + x2 * (y3 - y1)
+ x3 * (y1 - y2)) / 2.0)
def is_inside(player_coord_x, player_coord_y, shot_location_x, shot_location_y, pole_1_x=104.0, pole_1_y=30.34, pole_2_x=104.0, pole_2_y=37.66):
"""
Function to return whether player is between the player taking shot and goal.
Args:
player_coord_x (float): player-coordinate-x.
player_coord_y (float): player-coordinate-y.
shot_location_x (float): shot-coordinate-x.
shot_location_y (float): shot-coordinate-y.
pole_1_x (float, optional): goal-post(1) coordinate x. Defaults to 104.0.
pole_1_y (float, optional): goal-post(1) coordinate y. Defaults to 30.34.
pole_2_x (float, optional): goal-post(2) coordinate x. Defaults to 104.0.
pole_2_y (float, optional): goal-post(2) coordinate x. Defaults to 37.66.
Returns:
bool: True if present else False.
"""
# calculate area of triangle ABC
A = area(shot_location_x, shot_location_y, pole_1_x, pole_1_y, pole_2_x, pole_2_y)
# calculate area of triangle PBC
A1 = area(player_coord_x, player_coord_y, pole_1_x, pole_1_y, pole_2_x, pole_2_y)
# calculate area of triangle PAC
A2 = area(player_coord_x, player_coord_y, shot_location_x, shot_location_y, pole_2_x, pole_2_y)
# calculate area of triangle PAB
A3 = area(player_coord_x, player_coord_y, shot_location_x, shot_location_y, pole_1_x, pole_1_y)
# check if sum of A1, A2 and A3
# is same as A
if round(A,2) == round(A1 + A2 + A3, 2):
return True
else:
return False
def freeze_frame_vars(freeze_frame, shot_location_x, shot_location_y):
"""
Function for making freeze frame variables.
Args:
freeze_frame (list): containing tracking information.
shot_location_x (float): shot coordinate location x.
shot_location_y (float): shot coordinate location y.
Returns:
float values: 1. number of teammates between goal and shot-location.
2. number of opponents(excluding goalkeeper) between goal and shot-location.
3. goalkeeper covering angle.
4. distance between goalkeeper and the goal.
5. distance between goalkeeper and the shot-location.
"""
## init two variable to 0
count_teammate, count_opponent, goal_keeper_angle, dis_goal_keeper, dis_shot_keeper = 0, 0, 0, 0, 0
## traverse the freeze frame
for frame in freeze_frame:
## fetch coodinate location of the players
x_coord = coordinates_x(frame["location"])
y_coord = coordinates_y(frame["location"])
## fetch player's position
position = frame["position"]["name"]
if position != "Goalkeeper":
if frame["teammate"] == True and is_inside(x_coord, y_coord, shot_location_x, shot_location_y):
count_teammate += 1
elif frame["teammate"] == False and is_inside(x_coord, y_coord, shot_location_x, shot_location_y):
count_opponent += 1
else:
## compute goalkeeper covering angle
goal_keeper_angle = post_angle(x_coord, y_coord)
## compute distance between goalkeeper and goal
dis_goal_keeper = distance_bw_coordinates(x_coord, y_coord)
## compute distance between goalkeeper and shot-location
dis_shot_keeper = distance_bw_coordinates(x_coord, y_coord, shot_location_x, shot_location_y)
return count_teammate, count_opponent, goal_keeper_angle, dis_goal_keeper, dis_shot_keeper
def simple_dataset(comp_name, comp_id, season_ids, path_season, path_match, path_save, filename):
'''
Function to make a dataset for our simple-xG-model.
The dataset will have:
1. x and y location,
2. Statsbomb-xG,
3. Player Name,
4. Shot Type Name,
5. Body Part
6. Goal or No-Goal.
Arguments:
path_season -- str, path to the directory where event files are saved.
path_match -- str, path to the directory where match data file is stored for each competitions.
path_save -- str, path to the directory where the shot dataframe will be saved.
'''
## get event-dataframe
event_df = multiple_season_event_df(comp_name, comp_id, season_ids, path_match, path_season, shot="basic")
## col-list
col_list = ['location', 'shot_statsbomb_xg', 'player_name', "comp_name", 'shot_outcome_name', 'shot_body_part_name', 'shot_type_name']
## shot-dataframe from event-dataframe
shot_df = event_df.loc[:, col_list]
## create body part column
shot_df['body_part'] = shot_df['shot_body_part_name'].apply(body_part)
## create target column - 2 classes - goal and no goal
shot_df['target'] = shot_df['shot_outcome_name'].apply(goal)
## drop shot_outcome_name and shot_body_part_name column
shot_df.drop(['shot_outcome_name', 'shot_body_part_name'], axis=1, inplace=True)
## filter out shots from penalties, corners and Kick Off
shot_df = shot_df.loc[
(shot_df["shot_type_name"] != "Penalty") &
(shot_df["shot_type_name"] != "Corner") &
(shot_df["shot_type_name"] != "Kick Off")
]
## add x and y coordinate columns
shot_df['x'] = shot_df['location'].apply(coordinates_x)
shot_df['y'] = shot_df['location'].apply(coordinates_y)
## drop location column
shot_df.drop('location', inplace=True, axis=1)
## save the dataset
shot_df.to_pickle(f'{path_save}/{filename}')
def intermediate_dataset(df, adv=False):
"""
Function for making dataframe for intermediate model(containing shots info).
Args:
df (pandas.DataFrame): required dataframe.
adv (bool, optional): for preparing advanced dataset.
Returns:
pandas.DataFrame: dataframe for intermediate model
"""
## init an empty dictionary
if adv == True:
main_dict = {
'x' : [], 'y': [],
"shot_type_name": [], "shot_body_part_name": [],
"player_name": [], "shot_statsbomb_xg": [],
"pass_type": [], "open_goal": [],
"under_pressure": [], "deflected": [], "player_in_between": [],
"goal_keeper_angle": [], "target": []
}
else:
main_dict = {
'x' : [], 'y': [],
"shot_type_name": [], "shot_body_part_name": [],
"player_name": [], "shot_statsbomb_xg": [],
"pass_type": [], "open_goal": [],
"under_pressure": [], "deflected": [], "target": []
}
## fetch shots from the dataframe
shot_df = df.loc[
df["type_name"] == "Shot"
].copy()
## fetch key-pass and assists from the dataframe
try:
pass_df = df.loc[
(df["pass_shot_assist"] == True) |
(df["pass_goal_assist"] == True)
].copy().set_index("id")
except KeyError:
pass_df = df.loc[
(df["pass_shot_assist"] == True)
].copy().set_index("id")
for _, data in shot_df.iterrows():
## ignore shots from penalties, corners and Kick Off
if (data["shot_type_name"] == "Penalty") or\
(data["shot_type_name"] == "Corner") or\
(data["shot_type_name"] == "Kick Off"):
continue
## fetch shot location
location = data["location"]
## get x and y coordinates
x = coordinates_x(location)
y = coordinates_y(location)
if adv == True:
## fetch freeze frame
freeze_frame = data["shot_freeze_frame"]
## calculate freeze-frame-variables
count_teammate, count_opponent, goal_keeper_angle, dis_goal_keeper, dis_shot_keeper = freeze_frame_vars(
freeze_frame, x, y
)
## append info to main-dict for advanced features
main_dict["player_in_between"].append(count_teammate + count_opponent)
main_dict["goal_keeper_angle"].append(goal_keeper_angle)
## fetch shot_type_name
shot_type_name = data["shot_type_name"]
## fetch shot_outcome_name
if data["shot_outcome_name"] == "Goal":
target = 1
else:
target = 0
## fetch shot_body_part_name
if data["shot_body_part_name"] == "Right Foot":
body_part = "Foot"
elif data["shot_body_part_name"] == "Left Foot":
body_part = "Foot"
else:
body_part = data["shot_body_part_name"]
## fetch player name
player_name = data["player_name"]
## fetch statsbomb xG
stats_xg = data["shot_statsbomb_xg"]
try:
## fetch open_goal
if pd.isna(data["shot_open_goal"]):
open_goal = 0
else:
open_goal = 1
except Exception:
open_goal = 0
## fetch under-pressure
if pd.isna(data["under_pressure"]):
pressure = 0
elif data["under_pressure"] == True:
pressure = 1
## fetch deflected
try:
if pd.isna(data["shot_deflected"]):
deflected = 0
elif data["shot_deflected"] == True:
deflected = 1
except Exception:
deflected = 0
## is-assisted by a pass or not
if pd.isna(data["shot_key_pass_id"]):
pass_type = "Not Assisted"
else:
## fetch key pass id
key_pass_id = data["shot_key_pass_id"]
## fetch data-row of the key pass
temp_data = pass_df.loc[key_pass_id]
## init pass_type
pass_type = ""
## fetch through balls
try:
if temp_data["pass_technique_name"] == "Through Ball":
pass_type = "Through Ball"
except Exception:
pass
## fetch cutbacks
try:
if temp_data["pass_cut_back"] == True:
pass_type = "Cut Back"
except Exception:
pass
## fetch cross
try:
if temp_data["pass_cross"] == True:
pass_type = "Cross"
except Exception:
pass
if pass_type == "":
# fetch pass_type_name
if temp_data["pass_type_name"] == "Corner":
pass_type = "From Corner"
elif temp_data["pass_type_name"] == "Free Kick":
pass_type = "From Free Kick"
else:
pass_type = "Other"
## append to dict
main_dict['x'].append(x)
main_dict['y'].append(y)
main_dict["shot_type_name"].append(shot_type_name)
main_dict["shot_body_part_name"].append(body_part)
main_dict["player_name"].append(player_name)
main_dict["shot_statsbomb_xg"].append(stats_xg)
main_dict["pass_type"].append(pass_type)
main_dict["open_goal"].append(open_goal)
main_dict["under_pressure"].append(pressure)
main_dict["deflected"].append(deflected)
main_dict["target"].append(target)
return pd.DataFrame(main_dict)
def make_train_test(path, path_save):
'''
Function for making and saving train and test data.
Argument:
path -- str, path where the shot data is stored.
path_save -- str, path where the data will be stored.
'''
## load in all the datasets
ucl_data = pd.read_pickle(path+'/Champions_League_shots.pkl')
fawsl_data = pd.read_pickle(path+'/FA_Women\'s_Super_League_shots.pkl')
menwc_data = pd.read_pickle(path+'/FIFA_World_Cup_shots.pkl')
ll_data = pd.read_pickle(path+'/La_Liga_shots.pkl')
nwsl_data = pd.read_pickle(path+'/NWSL_shots.pkl')
pl_data = pd.read_pickle(path+'/Premier_League_shots.pkl')
wwc_data = pd.read_pickle(path+'/Women\'s_World_Cup_shots.pkl')
## make train dataframe
train_df = pd.concat(
[
ll_data,
ucl_data,
menwc_data,
pl_data,
nwsl_data
]
)
## make test dataframe
test_df = pd.concat(
[
fawsl_data,
wwc_data
]
)
## randomly shuffle both the datasets
train_df = train_df.sample(frac=1).reset_index(drop=True)
test_df = test_df.sample(frac=1).reset_index(drop=True)
## check for directory
if os.path.isdir(path_save) == False:
## make directory
os.mkdir(path_save)
## save train dataframe
train_df.to_pickle(path_save+'/train_df.pkl')
## save test dataframe
test_df.to_pickle(path_save+'/test_df.pkl') | StarcoderdataPython |
1797555 | import ckit
from ckit.ckit_const import *
## @addtogroup widget
## @{
#--------------------------------------------------------------------
## タブバーウィジェット
#
class TabBarWidget(ckit.Widget):
MAX_ITEM_WIDTH = 30
def __init__( self, window, x, y, width, height, selchange_handler ):
ckit.Widget.__init__( self, window, x, y, width, height )
self.plane0 = None
self.createThemePlane()
self.items = []
self.selection = None
self.scroll_pos = 0
self.selchange_handler = selchange_handler
self.paint()
def destroy(self):
self.destroyThemePlane()
def show(self,visible):
ckit.Widget.show(self,visible)
self.plane0.show(visible)
def charToTabIndex( self, char_x, char_y ):
x = -self.scroll_pos
if 0 <= (char_y - self.y) < self.height:
for i, item in enumerate(self.items):
name = item[0]
item_width = min( self.window.getStringWidth(name), TabBarWidget.MAX_ITEM_WIDTH ) + 2
if x <= (char_x - self.x) < x + item_width:
return i
x += item_width
return None
def onLeftButtonDown( self, char_x, char_y, mod ):
#print( "onLeftButtonDown", char_x, char_y, mod )
index = self.charToTabIndex( char_x, char_y )
if index==None : return
self.selection = index
if self.selchange_handler:
self.selchange_handler( self.selection, self.items[self.selection] )
def onLeftButtonUp( self, char_x, char_y, mod ):
#print( "onLeftButtonUp", char_x, char_y, mod )
pass
def createThemePlane(self):
if not self.plane0:
self.plane0 = ckit.ThemePlane3x3( self.window, 'tabbar0.png' )
def destroyThemePlane(self):
if self.plane0:
self.plane0.destroy()
self.plane0 = None
def setItems( self, items ):
self.items = items
self.paint()
def setSelection( self, selection ):
self.selection = selection
self.paint()
def makeVisible( self, index ):
tabs_width = 0
for i, item in enumerate(self.items):
name = item[0]
item_width = min( self.window.getStringWidth(name), TabBarWidget.MAX_ITEM_WIDTH ) + 2
if i==index:
if self.scroll_pos > tabs_width:
self.scroll_pos = tabs_width
elif self.scroll_pos + self.width < tabs_width + item_width:
self.scroll_pos = tabs_width + item_width - self.width
tabs_width += item_width
if i==len(self.items)-1:
if tabs_width < self.scroll_pos + self.width:
self.scroll_pos = max( tabs_width - self.width, 0 )
def paint(self):
if self.selection!=None:
self.makeVisible(self.selection)
client_rect = self.window.getClientRect()
offset_x, offset_y = self.window.charToClient( 0, 0 )
char_w, char_h = self.window.getCharSize()
# 背景画像をウインドウの端にあわせる
offset_x2 = 0
if self.x==0 : offset_x2 = offset_x
offset_x3 = 0
if self.x+self.width==self.window.width() : offset_x3 = offset_x
offset_y2 = 0
if self.y==0 : offset_y2 = offset_y
offset_y3 = 0
if self.y+self.height==self.window.height() : offset_y3 = offset_y
# 背景画像
self.plane0.setPosSize( self.x*char_w+offset_x-offset_x2, self.y*char_h+offset_y-offset_y2, self.width*char_w+offset_x2+offset_x3, self.height*char_h+offset_y2+offset_y3 )
line_color = (120,120,120)
active_bg_color = (240,240,240)
inactive_bg_color = None
fg = ckit.getColor("bar_fg")
attr = ckit.Attribute( fg=fg )
attribute_table = {}
attribute_table[ True, 0 ] = ckit.Attribute( fg=fg, bg=active_bg_color, line0=( LINE_LEFT, line_color ) )
attribute_table[ True, 1 ] = ckit.Attribute( fg=fg, bg=active_bg_color, line0=( 0, line_color ) )
attribute_table[ True, 2 ] = ckit.Attribute( fg=fg, bg=active_bg_color, line0=( LINE_RIGHT, line_color ) )
attribute_table[ False, 0 ] = ckit.Attribute( fg=fg, bg=inactive_bg_color, line0=( LINE_LEFT, line_color ) )
attribute_table[ False, 1 ] = ckit.Attribute( fg=fg, bg=inactive_bg_color, line0=( 0, line_color ) )
attribute_table[ False, 2 ] = ckit.Attribute( fg=fg, bg=inactive_bg_color, line0=( LINE_RIGHT, line_color ) )
# テキスト塗りつぶし
self.window.putString( self.x, self.y, self.width, 1, attr, " " * self.width )
# アイテム
x = self.x
y = self.y
width = self.width
height = self.height
offset = -self.scroll_pos
for i, item in enumerate(self.items):
active = i==self.selection
name = item[0]
item_width = self.window.getStringWidth(name)
if item_width>TabBarWidget.MAX_ITEM_WIDTH:
name = ckit.adjustStringWidth( self.window, name, TabBarWidget.MAX_ITEM_WIDTH, align=ckit.ALIGN_LEFT, ellipsis=ckit.ELLIPSIS_RIGHT )
item_width = TabBarWidget.MAX_ITEM_WIDTH
self.window.putString( x, y, width, height, attribute_table[active,0], " ", offset=offset )
offset += 1
self.window.putString( x, y, width-1, height, attribute_table[active,1], name, offset=offset )
offset += item_width
if i<len(self.items)-1:
self.window.putString( x, y, width, height, attribute_table[active,1], " ", offset=offset )
else:
self.window.putString( x, y, width, height, attribute_table[active,2], " ", offset=offset )
offset += 1
## @} widget
| StarcoderdataPython |
3278757 | from os import environ
import boto3
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
def get_new_boto3_session():
return boto3.session.Session(
# Credentials have to be set in environment variables
region_name = environ.get('AWS_REGION') or config['aws']['region_name'],
aws_access_key_id = environ.get('AWS_ACCESS_KEY_ID') or config['aws']['aws_access_key_id'],
aws_secret_access_key = environ.get('AWS_SECRET_ACCESS_KEY') or config['aws']['aws_secret_access_key']
)
| StarcoderdataPython |
3298546 | # Generated by Django 3.1.13 on 2021-10-07 15:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("workstation_configs", "0009_auto_20211005_1329")]
operations = [
migrations.AddField(
model_name="workstationconfig",
name="show_annotation_counter_tool",
field=models.BooleanField(default=True),
)
]
| StarcoderdataPython |
4835307 | from django.contrib import admin
# RegisterView your models here.
from openbook_auth.models import User, UserProfile
class UserProfileInline(admin.TabularInline):
model = UserProfile
def has_delete_permission(self, request, obj=None):
return False
class UserAdmin(admin.ModelAdmin):
inlines = [
UserProfileInline,
]
search_fields = ('username',)
exclude = ('password',)
def has_add_permission(self, request, obj=None):
return False
admin.site.register(User, UserAdmin)
| StarcoderdataPython |
123464 | <filename>examples/tables.py<gh_stars>0
import django_tables2 as tables
from .models import equipment
class equipmentTable(tables.Table):
T_read =('<button type=\"button\" class=\"read-book btn btn-sm btn-primary\" data-id="{% url \'read_book\' record.pk %}\"><span class=\"fa fa-eye\"></span> </button>')
read = tables.TemplateColumn(T_read); read.verbose_name=''
T_edit =('<button type=\"button\" class=\"update-book btn btn-sm btn-primary\" data-id="{% url \'update_book\' record.pk %}\"><span class=\"fa fa-pencil\"></span> </button>')
edit = tables.TemplateColumn(T_edit); edit.verbose_name=''
T_del =('<button type=\"button\" class=\"delete-book btn btn-sm btn-danger\" data-id="{% url \'delete_book\' record.pk %}\"><span class=\"fa fa-trash\"></span> </button>')
delele = tables.TemplateColumn(T_del); delele.verbose_name=''
#delete2 = tables.TemplateColumn('<a href="{% url "delete_division" record.pk %}"> width="25"></a>',verbose_name=u'Delete',)
class Meta:
model = equipment
template_name = "django_tables2/bootstrap.html"
| StarcoderdataPython |
179852 | <gh_stars>10-100
import sys
def hello(who):
print('hello {}'.format(who))
hello(sys.argv[1]) | StarcoderdataPython |
135120 | <filename>reconcile/test/test_quay_repos.py
from unittest.mock import patch
from reconcile.quay_repos import RepoInfo, act
from reconcile.quay_base import OrgKey
from .fixtures import Fixtures
fxt = Fixtures("quay_repos")
def build_state(fixture_state):
return [
RepoInfo(
org_key=OrgKey("instance", "org"),
name=item[0],
public=item[1],
description=item[2],
)
for item in fixture_state
]
def get_test_repo_from_state(state, name):
for item in state:
if item.name == name:
return item
return None
class TestQuayRepos:
@staticmethod
@patch("reconcile.quay_repos.act_public")
@patch("reconcile.quay_repos.act_description")
@patch("reconcile.quay_repos.act_delete")
@patch("reconcile.quay_repos.act_create")
def test_act(act_create, act_delete, act_description, act_public):
fixture = fxt.get_anymarkup("state.yml")
current_state = build_state(fixture["current_state"])
desired_state = build_state(fixture["desired_state"])
quay_api_store = {}
dry_run = True
act(dry_run, quay_api_store, current_state, desired_state)
repo_delete = get_test_repo_from_state(current_state, "repo_delete")
act_delete.assert_called_once_with(dry_run, quay_api_store, repo_delete)
repo_create = get_test_repo_from_state(desired_state, "repo_create")
act_create.assert_called_once_with(dry_run, quay_api_store, repo_create)
repo_desc = get_test_repo_from_state(desired_state, "repo_desc")
act_description.assert_called_once_with(dry_run, quay_api_store, repo_desc)
repo_public = get_test_repo_from_state(desired_state, "repo_public")
act_public.assert_called_once_with(dry_run, quay_api_store, repo_public)
| StarcoderdataPython |
3298308 | from app import app
from flask import render_template, jsonify, request
from loguru import logger
@app.route("/")
@app.route("/index")
def index():
return render_template("index.html")
@app.route("/dummy", methods=['POST'])
def dummy():
string = request.form.get('string')
return jsonify({"dummy":string}) | StarcoderdataPython |
44072 | <filename>tests/aliyun_iot_test.py<gh_stars>1-10
# -*- coding: utf-8-*-
import unittest
import os
os.sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from app.components import logger
from app.components.aliyun_iot import IotServer
class TestComponentsAliyunIOT(unittest.TestCase):
"""
表格计算
"""
def setUp(self):
self.iot_server = IotServer.get_instance()
def test_send_device_message(self):
"""
批量读取行数据
:return:
"""
self.iot_server.send_device_message('马云')
def test_sync_iot_shadow(self):
r = self.iot_server.get_iot_shadow()
print(r)
pass
if __name__ == '__main__':
logger.init(info=True)
unittest.main()
| StarcoderdataPython |
1658957 | # coding=utf-8
import datetime
import StringIO
import glob
import os
import traceback
import urlparse
from zipfile import ZipFile, ZIP_DEFLATED
from subzero.language import Language
from subzero.lib.io import FileIO
from subzero.constants import PREFIX, PLUGIN_IDENTIFIER
from menu_helpers import SubFolderObjectContainer, debounce, set_refresh_menu_state, ZipObject, ObjectContainer, route
from main import fatality
from support.helpers import timestamp, pad_title
from support.config import config
from support.lib import Plex
from support.storage import reset_storage, log_storage, get_subtitle_storage
from support.scheduler import scheduler
from support.items import set_mods_for_part, get_item_kind_from_rating_key
from support.i18n import _
@route(PREFIX + '/advanced')
def AdvancedMenu(randomize=None, header=None, message=None):
oc = SubFolderObjectContainer(
header=header or _("Internal stuff, pay attention!"),
message=message,
no_cache=True,
no_history=True,
replace_parent=False,
title2=_("Advanced"))
if config.lock_advanced_menu and not config.pin_correct:
oc.add(DirectoryObject(
key=Callback(
PinMenu,
randomize=timestamp(),
success_go_to=_("advanced")),
title=pad_title(_("Enter PIN")),
summary=_("The owner has restricted the access to this menu. Please enter the correct pin"),
))
return oc
oc.add(DirectoryObject(
key=Callback(TriggerRestart, randomize=timestamp()),
title=pad_title(_("Restart the plugin")),
))
oc.add(DirectoryObject(
key=Callback(GetLogsLink),
title=_("Get my logs (copy the appearing link and open it in your browser, please)"),
summary=_("Copy the appearing link and open it in your browser, please"),
))
oc.add(DirectoryObject(
key=Callback(TriggerBetterSubtitles, randomize=timestamp()),
title=pad_title(_("Trigger find better subtitles")),
))
oc.add(DirectoryObject(
key=Callback(SkipFindBetterSubtitles, randomize=timestamp()),
title=pad_title(_("Skip next find better subtitles (sets last run to now)")),
))
oc.add(DirectoryObject(
key=Callback(SkipRecentlyAddedMissing, randomize=timestamp()),
title=pad_title(_("Skip next find recently added with missing subtitles (sets last run to now)")),
))
oc.add(DirectoryObject(
key=Callback(TriggerStorageMaintenance, randomize=timestamp()),
title=pad_title(_("Trigger subtitle storage maintenance")),
))
oc.add(DirectoryObject(
key=Callback(TriggerStorageMigration, randomize=timestamp()),
title=pad_title(_("Trigger subtitle storage migration (expensive)")),
))
oc.add(DirectoryObject(
key=Callback(TriggerCacheMaintenance, randomize=timestamp()),
title=pad_title(_("Trigger cache maintenance (refiners, providers and packs/archives)")),
))
oc.add(DirectoryObject(
key=Callback(ApplyDefaultMods, randomize=timestamp()),
title=pad_title(_("Apply configured default subtitle mods to all (active) stored subtitles")),
))
oc.add(DirectoryObject(
key=Callback(ReApplyMods, randomize=timestamp()),
title=pad_title(_("Re-Apply mods of all stored subtitles")),
))
oc.add(DirectoryObject(
key=Callback(LogStorage, key="tasks", randomize=timestamp()),
title=pad_title(_("Log the plugin's scheduled tasks state storage")),
))
oc.add(DirectoryObject(
key=Callback(LogStorage, key="ignore", randomize=timestamp()),
title=pad_title(_("Log the plugin's internal ignorelist storage")),
))
oc.add(DirectoryObject(
key=Callback(LogStorage, key=None, randomize=timestamp()),
title=pad_title(_("Log the plugin's complete state storage")),
))
oc.add(DirectoryObject(
key=Callback(ResetStorage, key="tasks", randomize=timestamp()),
title=pad_title(_("Reset the plugin's scheduled tasks state storage")),
))
oc.add(DirectoryObject(
key=Callback(ResetStorage, key="ignore", randomize=timestamp()),
title=pad_title(_("Reset the plugin's internal ignorelist storage")),
))
oc.add(DirectoryObject(
key=Callback(ResetStorage, key="menu_history", randomize=timestamp()),
title=pad_title(_("Reset the plugin's menu history storage")),
))
oc.add(DirectoryObject(
key=Callback(InvalidateCache, randomize=timestamp()),
title=pad_title(_("Invalidate Sub-Zero metadata caches (subliminal)")),
))
oc.add(DirectoryObject(
key=Callback(ResetProviderThrottle, randomize=timestamp()),
title=pad_title(_("Reset provider throttle states")),
))
return oc
def DispatchRestart():
Thread.CreateTimer(1.0, Restart)
@route(PREFIX + '/advanced/restart/trigger')
@debounce
def TriggerRestart(randomize=None):
set_refresh_menu_state(_("Restarting the plugin"))
DispatchRestart()
return fatality(
header=_("Restart triggered, please wait about 5 seconds"),
force_title=" ",
only_refresh=True,
replace_parent=True,
no_history=True,
randomize=timestamp())
@route(PREFIX + '/advanced/restart/execute')
@debounce
def Restart(randomize=None):
Plex[":/plugins"].restart(PLUGIN_IDENTIFIER)
@route(PREFIX + '/storage/reset', sure=bool)
@debounce
def ResetStorage(key, randomize=None, sure=False):
if not sure:
oc = SubFolderObjectContainer(
no_history=True,
title1=_("Reset subtitle storage"),
title2=_("Are you sure?"))
oc.add(DirectoryObject(
key=Callback(
ResetStorage,
key=key,
sure=True,
randomize=timestamp()),
title=pad_title(_("Are you really sure?")),
))
return oc
reset_storage(key)
if key == "tasks":
# reinitialize the scheduler
scheduler.init_storage()
scheduler.setup_tasks()
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("Information Storage (%s) reset", key)
)
@route(PREFIX + '/storage/log')
def LogStorage(key, randomize=None):
log_storage(key)
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("Information Storage (%s) logged", key)
)
@route(PREFIX + '/triggerbetter')
@debounce
def TriggerBetterSubtitles(randomize=None):
scheduler.dispatch_task("FindBetterSubtitles")
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("FindBetterSubtitles triggered")
)
@route(PREFIX + '/skipbetter')
@debounce
def SkipFindBetterSubtitles(randomize=None):
task = scheduler.task("FindBetterSubtitles")
task.last_run = datetime.datetime.now()
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("FindBetterSubtitles skipped")
)
@route(PREFIX + '/skipram')
@debounce
def SkipRecentlyAddedMissing(randomize=None):
task = scheduler.task("SearchAllRecentlyAddedMissing")
task.last_run = datetime.datetime.now()
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("SearchAllRecentlyAddedMissing skipped")
)
@route(PREFIX + '/triggermaintenance')
@debounce
def TriggerStorageMaintenance(randomize=None):
scheduler.dispatch_task("SubtitleStorageMaintenance")
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("SubtitleStorageMaintenance triggered")
)
@route(PREFIX + '/triggerstoragemigration')
@debounce
def TriggerStorageMigration(randomize=None):
scheduler.dispatch_task("MigrateSubtitleStorage")
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("MigrateSubtitleStorage triggered")
)
@route(PREFIX + '/triggercachemaintenance')
@debounce
def TriggerCacheMaintenance(randomize=None):
scheduler.dispatch_task("CacheMaintenance")
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("TriggerCacheMaintenance triggered")
)
def apply_default_mods(reapply_current=False, scandir_generic=False):
storage = get_subtitle_storage()
subs_applied = 0
try:
for fn in storage.get_all_files(scandir_generic=scandir_generic):
data = storage.load(None, filename=fn)
if data:
video_id = data.video_id
item_type = get_item_kind_from_rating_key(video_id)
if not item_type:
continue
for part_id, part in data.parts.iteritems():
for lang, subs in part.iteritems():
current_sub = subs.get("current")
if not current_sub:
continue
sub = subs[current_sub]
if not sub.content:
continue
current_mods = sub.mods or []
if not reapply_current:
add_mods = list(set(config.default_mods).difference(set(current_mods)))
if not add_mods:
continue
else:
if not current_mods:
continue
add_mods = []
try:
set_mods_for_part(video_id, part_id, Language.fromietf(lang), item_type, add_mods, mode="add")
except:
Log.Error("Couldn't set mods for %s:%s: %s", video_id, part_id, traceback.format_exc())
continue
subs_applied += 1
except OSError:
return apply_default_mods(reapply_current=reapply_current, scandir_generic=True)
storage.destroy()
Log.Debug("Applied mods to %i items" % subs_applied)
@route(PREFIX + '/applydefaultmods')
@debounce
def ApplyDefaultMods(randomize=None):
Thread.CreateTimer(1.0, apply_default_mods)
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("This may take some time ...")
)
@route(PREFIX + '/reapplyallmods')
@debounce
def ReApplyMods(randomize=None):
Thread.CreateTimer(1.0, apply_default_mods, reapply_current=True)
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("This may take some time ...")
)
@route(PREFIX + '/get_logs_link')
def GetLogsLink():
if not config.plex_token:
oc = ObjectContainer(
title2=_("Download Logs"),
no_cache=True,
no_history=True,
header=_("Sorry, feature unavailable"),
message=_("Universal Plex token not available"))
return oc
# try getting the link base via the request in context, first, otherwise use the public ip
req_headers = Core.sandbox.context.request.headers
get_external_ip = True
link_base = ""
if "Origin" in req_headers:
link_base = req_headers["Origin"]
Log.Debug("Using origin-based link_base")
get_external_ip = False
elif "Referer" in req_headers:
parsed = urlparse.urlparse(req_headers["Referer"])
link_base = "%s://%s%s" % (parsed.scheme, parsed.hostname, (":%s" % parsed.port) if parsed.port else "")
Log.Debug("Using referer-based link_base")
get_external_ip = False
if get_external_ip or "plex.tv" in link_base:
ip = Core.networking.http_request("http://www.plexapp.com/ip.php", cacheTime=7200).content.strip()
link_base = "https://%s:32400" % ip
Log.Debug("Using ip-based fallback link_base")
logs_link = "%s%s?X-Plex-Token=%s" % (link_base, PREFIX + '/logs', config.plex_token)
oc = ObjectContainer(
title2=logs_link,
no_cache=True,
no_history=True,
header=_("Copy this link and open this in your browser, please"),
message=logs_link)
return oc
@route(PREFIX + '/logs')
def DownloadLogs():
buff = StringIO.StringIO()
zip_archive = ZipFile(buff, mode='w', compression=ZIP_DEFLATED)
logs = sorted(glob.glob(config.plugin_log_path + '*')) + [config.server_log_path]
for path in logs:
data = StringIO.StringIO()
data.write(FileIO.read(path))
zip_archive.writestr(os.path.basename(path), data.getvalue())
zip_archive.close()
return ZipObject(buff.getvalue())
@route(PREFIX + '/invalidatecache')
@debounce
def InvalidateCache(randomize=None):
from subliminal.cache import region
if config.new_style_cache:
region.backend.clear()
else:
region.invalidate()
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("Cache invalidated")
)
@route(PREFIX + '/pin')
def PinMenu(pin="", randomize=None, success_go_to="channel"):
oc = ObjectContainer(
title2=_("Enter PIN number ") + str(len(pin) + 1),
no_cache=True,
no_history=True,
skip_pin_lock=True)
if pin == config.pin:
Dict["pin_correct_time"] = datetime.datetime.now()
config.locked = False
if success_go_to == "channel":
return fatality(
force_title=_("PIN correct"),
header=_("PIN correct"),
no_history=True)
elif success_go_to == "advanced":
return AdvancedMenu(randomize=timestamp())
for i in range(10):
oc.add(DirectoryObject(
key=Callback(
PinMenu,
randomize=timestamp(),
pin=pin + str(i),
success_go_to=success_go_to),
title=pad_title(str(i)),
))
oc.add(DirectoryObject(
key=Callback(
PinMenu,
randomize=timestamp(),
success_go_to=success_go_to),
title=pad_title(_("Reset")),
))
return oc
@route(PREFIX + '/pin_lock')
def ClearPin(randomize=None):
Dict["pin_correct_time"] = None
config.locked = True
return fatality(force_title=_("Menu locked"), header=" ", no_history=True)
@route(PREFIX + '/reset_throttle')
def ResetProviderThrottle(randomize=None):
Dict["provider_throttle"] = {}
Dict.Save()
return AdvancedMenu(
randomize=timestamp(),
header=_("Success"),
message=_("Provider throttles reset")
)
| StarcoderdataPython |
187277 | #!/usr/bin/env python3
# coding:utf-8
import this
s = "va gur snpr bs jung?"
print('=' * 50)
print(''.join([this.d.get(c, c) for c in s]))
| StarcoderdataPython |
3327253 | from datetime import timedelta
from dateutil.relativedelta import relativedelta
class RatingDatesMixin:
def get_date(self, rating_date):
# two years ago
return rating_date - timedelta(days=365 * 2)
def tournament_age(self, end_date, rating_date):
"""
Check about page for detailed description
"""
diff = relativedelta(rating_date, end_date)
part = (1 / 7) * 100
if diff.years < 1:
return 100
elif 1 <= diff.years < 2:
value = int(diff.months / 2 + 1)
return round(100 - (value * part), 2)
else:
return 0
| StarcoderdataPython |
1785906 | # as defined in PyTorch, custom extension
import pathlib
from typing import BinaryIO, List, Optional, Text, Tuple, Union
import torch
from PIL import Image, ImageDraw, ImageFont
from torchvision.utils import make_grid
def save_image(
tensor: Union[torch.Tensor, List[torch.Tensor]],
fp: Union[Text, pathlib.Path, BinaryIO],
nrow: int = 8,
padding: int = 2,
normalize: bool = False,
range: Optional[Tuple[int, int]] = None,
scale_each: bool = False,
pad_value: int = 0,
format: Optional[str] = None,
label: Optional[str] = None,
label2: Optional[str] = None,
) -> None:
grid = make_grid(
tensor,
nrow=nrow,
padding=padding,
pad_value=pad_value,
normalize=normalize,
range=range,
scale_each=scale_each,
)
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
ndarr = (
grid.mul(255)
.add_(0.5)
.clamp_(0, 255)
.permute(1, 2, 0)
.to("cpu", torch.uint8)
.numpy()
)
im = Image.fromarray(ndarr)
if label:
d = ImageDraw.Draw(im)
fnt = ImageFont.load_default()
x, _ = im.size
y = 5
# iteration label
w, h = fnt.getsize(label)
d.rectangle((x - w - 4, y, x - 2, y + h), fill="black")
d.text((x - w - 2, y), label, fnt=fnt, fill=(255, 255, 0))
if label2:
# model label
w, h = fnt.getsize(label2)
d.rectangle((2, y, w + 4, y + h), fill="black")
d.text((4, y), label2, fnt=fnt, fill=(255, 255, 0))
im.save(fp, format=format)
| StarcoderdataPython |
3253784 | <reponame>Ashwin-op/Advent_of_Code<filename>2020/Day 13 - Shuttle Search/1.py
with open("input.txt") as fp:
startTime = int(fp.readline().strip())
busTimes = [int(i) for i in fp.readline().split(',') if i != 'x']
def closestMultiple(n, x):
if x > n:
return x
z = int(x / 2)
n = n + z
n = n - (n % x)
return n
multiples = [closestMultiple(startTime, i) for i in busTimes]
busToTake = min(i for i in multiples if i >= startTime)
print((busToTake-startTime)*(busTimes[multiples.index(busToTake)]))
| StarcoderdataPython |
1787092 | <filename>inbm/dispatcher-agent/dispatcher/device_manager/constants.py<gh_stars>1-10
"""
Constants for DeviceManager classes
Copyright (C) 2017-2022 Intel Corporation
SPDX-License-Identifier: Apache-2.0
"""
# Linux specific constants
LINUX_POWER = "/sbin/shutdown "
LINUX_RESTART = "-r"
LINUX_SHUTDOWN = "-h"
LINUX_SECRETS_FILE = "/var/intel-manageability/secret.img"
# Windows specific constants
WIN_POWER = "shutdown "
WIN_RESTART = "/r"
WIN_SHUTDOWN = "/s"
# Success messages
SUCCESS_RESTART = "Restart Command Success"
SUCCESS_SHUTDOWN = "Shutdown Success"
SUCCESS_DECOMMISSION = "Decommission Success"
| StarcoderdataPython |
3241481 | <filename>gameinterface/minecraftinterface.py<gh_stars>0
import win32gui
import keyboard
import time
import pyautogui
import d3dshot
from PIL import Image
target_size = (640, 480)
target_location = (0, 0)
screen_grab_location_offset = (9, 34)
screen_grab_size_offset = (-9, -7)
default_region = (9, 34, 631, 473)
def window_enumeration_handler(hwnd, top_windows):
found_object = {'app': (hwnd, win32gui.GetWindowText(hwnd))}
rect = win32gui.GetWindowRect(hwnd)
x = rect[0]
y = rect[1]
w = rect[2] - x
h = rect[3] - y
found_object['location'] = (x, y)
found_object['size'] = (w, h)
top_windows.append(found_object)
class Win10MinecraftApp:
def __init__(self):
self.d = d3dshot.create(frame_buffer_size=100, capture_output="numpy")
self.d.capture(region=default_region)
def move_mc(self):
self.top_windows = []
self.minecraft = []
win32gui.EnumWindows(window_enumeration_handler, self.top_windows)
for i in self.top_windows:
if "minecraft" == i['app'][1].lower():
self.minecraft = i
break
if not self.minecraft:
lol = 1
# raise Exception("Windows 10 Minecraft found to not be running,"
# "Make sure Windows 10 Minecraft is running and "
# "your world is loaded")
win32gui.MoveWindow(self.minecraft['app'][0], target_location[0], target_location[1],
target_size[0], target_size[1], True)
x0, y0, x1, y1 = win32gui.GetWindowRect(self.minecraft['app'][0])
w = x1 - x0 # width
h = y1 - y0 # height
self.minecraft['location'] = (x0, y0)
self.minecraft['size'] = (w, h)
win32gui.SetForegroundWindow(self.minecraft['app'][0])
region = (
self.minecraft['location'][0] + screen_grab_location_offset[0],
self.minecraft['location'][1] + screen_grab_location_offset[1],
self.minecraft['size'][0] + screen_grab_size_offset[0],
self.minecraft['size'][1] + screen_grab_size_offset[1])
print(region)
#self.d.capture( region=region)
print(f"Minecraft Windows 10 Found, Loc:{self.minecraft['size']}")
def send_keystroke(self, key_instructions):
if not isinstance(key_instructions, list):
raise Exception(f"Expected list of keystroke actions, got {type(key_instructions)}")
for key_instruction in key_instructions:
if key_instruction['action'] == 'press':
keyboard.press(key_instruction['key'])
elif key_instruction['action'] == 'press_and_release':
keyboard.press(key_instruction['key'])
time.sleep(0.1)
keyboard.release(key_instruction['key'])
elif key_instruction['action'] == 'release':
keyboard.release(key_instruction['key'])
else:
raise Exception("Invalid Instruction to act upon")
def is_pressed(self, key):
return keyboard.is_pressed(key)
def get_screen(self):
return self.d.get_latest_frame()
def get_screen(self, frame_count):
if len(self.d.frame_buffer) > frame_count:
return self.d.get_frame_stack(tuple(range(0, frame_count)), stack_dimension="last")
else:
return None
def get_screen_and_keys(self):
return self.get_screen(), self.get_keys()
def get_screen_and_keys(self, frame_count):
return self.get_screen(frame_count=frame_count), self.get_keys()
def get_keys(self):
keys_down = []
keys_to_get = ['w', 'k', 'l', 'space']
for key in keys_to_get:
keys_down.append({key: keyboard.is_pressed(key)})
return keys_down
| StarcoderdataPython |
168080 | <reponame>notabela-org/project-1-photoshare-GarlandQ<gh_stars>1-10
# Generated by Django 3.1.6 on 2021-03-07 07:46
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('feed', '0002_auto_20210307_0227'),
]
operations = [
migrations.RenameModel(
old_name='UserProfile',
new_name='Profile',
),
]
| StarcoderdataPython |
1678174 | <filename>tests/relay_integration/tests.py
from __future__ import absolute_import, print_function
import os
from sentry import eventstore
from django.core.urlresolvers import reverse
from exam import fixture
from sentry.testutils import TransactionTestCase, RelayStoreHelper
from sentry.testutils.helpers.datetime import iso_format, before_now
def get_fixture_path(name):
return os.path.join(os.path.dirname(__file__), "fixtures", name)
def load_fixture(name):
with open(get_fixture_path(name)) as fp:
return fp.read()
class SentryRemoteTest(RelayStoreHelper, TransactionTestCase):
@fixture
def path(self):
return reverse("sentry-api-store")
def get_event(self, event_id):
instance = eventstore.get_event_by_id(self.project.id, event_id)
return instance
# used to be test_ungzipped_data
def test_simple_data(self):
event_data = {"message": "hello", "timestamp": iso_format(before_now(seconds=1))}
event = self.post_and_retrieve_event(event_data)
assert event.message == "hello"
| StarcoderdataPython |
69911 | <filename>bridge/users/api.py
#
# Copyright (c) 2019 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from rest_framework.generics import get_object_or_404
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from bridge.access import DataViewPermission
from users.models import DataView, PreferableView
from users.serializers import DataViewSerializer
class DataViewAPIViewSet(ModelViewSet):
permission_classes = (DataViewPermission,)
serializer_class = DataViewSerializer
lookup_url_kwarg = 'type'
def get_serializer(self, *args, **kwargs):
if self.request.method == 'GET':
fields = self.request.query_params.getlist('fields')
elif self.request.method == 'POST':
fields = {'shared', 'name', 'view', 'type'}
else:
fields = {'shared', 'view'}
return super().get_serializer(*args, fields=fields, **kwargs)
def get_queryset(self):
return DataView.objects.filter(author=self.request.user)
class PreferViewAPIView(APIView):
def delete(self, request, view_type):
PreferableView.objects.filter(view__type=view_type, user=request.user).delete()
return Response({})
def post(self, request, view_id):
view = get_object_or_404(DataView, id=view_id, author=request.user)
PreferableView.objects.filter(view__type=view.type, user=request.user).delete()
PreferableView.objects.create(view=view, user=request.user)
return Response({})
| StarcoderdataPython |
1613383 | <reponame>terryyylim/feast<gh_stars>0
import os
import random
import string
import tempfile
from datetime import datetime, timedelta
import click
import pyarrow as pa
from tqdm import tqdm
from feast.data_source import FileSource
from feast.entity import Entity
from feast.feature import Feature
from feast.feature_store import FeatureStore, _convert_arrow_to_proto
from feast.feature_view import FeatureView
from feast.repo_config import RepoConfig
from feast.value_type import ValueType
from tests.driver_test_data import create_driver_hourly_stats_df
def create_driver_hourly_stats_feature_view(source):
driver_stats_feature_view = FeatureView(
name="driver_stats",
entities=["driver_id"],
features=[
Feature(name="conv_rate", dtype=ValueType.FLOAT),
Feature(name="acc_rate", dtype=ValueType.FLOAT),
Feature(name="avg_daily_trips", dtype=ValueType.INT32),
],
input=source,
ttl=timedelta(hours=2),
)
return driver_stats_feature_view
def create_driver_hourly_stats_source(parquet_path):
return FileSource(
path=parquet_path,
event_timestamp_column="datetime",
created_timestamp_column="created",
)
@click.command(name="run")
def benchmark_writes():
project_id = "test" + "".join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(10)
)
with tempfile.TemporaryDirectory() as temp_dir:
store = FeatureStore(
config=RepoConfig(
registry=os.path.join(temp_dir, "registry.db"),
project=project_id,
provider="gcp",
)
)
# This is just to set data source to something, we're not reading from parquet source here.
parquet_path = os.path.join(temp_dir, "data.parquet")
driver = Entity(name="driver_id", value_type=ValueType.INT64)
table = create_driver_hourly_stats_feature_view(
create_driver_hourly_stats_source(parquet_path=parquet_path)
)
store.apply([table, driver])
provider = store._get_provider()
end_date = datetime.utcnow()
start_date = end_date - timedelta(days=14)
customers = list(range(100))
data = create_driver_hourly_stats_df(customers, start_date, end_date)
# Show the data for reference
print(data)
proto_data = _convert_arrow_to_proto(pa.Table.from_pandas(data), table)
# Write it
with tqdm(total=len(proto_data)) as progress:
provider.online_write_batch(
project=store.project,
table=table,
data=proto_data,
progress=progress.update,
)
registry_tables = store._get_registry().list_feature_views(
project=store.project
)
provider.teardown_infra(store.project, tables=registry_tables)
if __name__ == "__main__":
benchmark_writes()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.