text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import tweepy
import json
import codecs
import sys
busqueda=""
class Twitter_tiempo_real:
#Constructor
def __init__(self,consumer_key,consumer_secret,access_token_key,access_token_secret):
self.consumer_key = consumer_key
self.consumer_secret =consumer_secret
self.access_token_key = access_token_key
self.access_token_secret = access_token_secret
try:
self.auth1 = tweepy.OAuthHandler(self.consumer_key, self.consumer_secret)
self.auth1.set_access_token(self.access_token_key, self.access_token_secret)
except ValueError:
return ""
except TypeError:
return ""
except UnicodeDecodeError:
return ""
except:
return ""
def obtencion_de_tweets(self,lista):
'''
Gets Tweets about a specific topic from the stream
In:
(lista, list) list of topics
Out:
(listaLimpia, list) Create a file called tweet.txt with the tweets
'''
try:
self.l = StreamListener()
streamer = tweepy.Stream(auth=self.auth1, listener=self.l, timeout=60)
streamer.filter(track = lista, languages=['es'])
except ValueError:
return ""
except TypeError:
return ""
except UnicodeDecodeError:
return ""
except:
return ""
class StreamListener(tweepy.StreamListener):
#archivo donde se almacenara la informacion
archivoEscritura=codecs.open("tweet.txt","a","utf-8")
numero_tweets=1000
#variable
contador = 1
def on_status(self, tweet):
'''
status from the stream
In:
(tweet, text) text string
Out:
'''
print "------------------"
def on_error(self, status_code):
'''
errors from the stream
In:
(status_code, text) text string with the error code
Out:
'''
print 'Error: ' + repr(status_code)
return False
def on_data(self, data):
'''
data from the stream(tweets about a topic)
In:
(data, JSON format) JSON with the tweets from the stream
Out:
'''
if (self.contador <= self.numero_tweets):
diccionario={}
print "encontrando tweet numero "+str(self.contador)
diccionario=json.loads(data)
tweet=(diccionario["text"]).replace("\n", "")
cadena=','.join(busqueda) +"/|/"+diccionario["created_at"]+"/|/"+diccionario["lang"]+"/|/"+diccionario["user"]["name"]+"/|/"+tweet+"/|/"+str(diccionario["user"]["friends_count"])+"/|/"+str(diccionario["user"]["followers_count"])+"\n"
self.archivoEscritura.write(cadena);
self.contador += 1
else:
self.archivoEscritura.close();
return False
def on_timeout(self):
'''
cheks if the timeout limit is reach
In:
Out:
'''
print 'Tiempo fuera, no se encontro mas informacion en el tiempo establecido como limite...'
archivoEscritura.close();
return False
'''
caso de uso
consumer_key = 'tPTSY5fz1l8jTBamlsg'
consumer_secret = 'IkGZgb7RXY6hlINW4yXDDo078LfikNqYnybMeAwkhI'
access_token_key = '2255882202-OQWZ7gz3HZipE7Cas0oeJKRPA8vF8HJT4edyNgq'
access_token_secret = '6JpyzfF18GGMEFToWVvvkxz0Zv8SeTM99Eoxnchz67PS0'
prueba=Twitter_tiempo_real(consumer_key,consumer_secret,access_token_key,access_token_secret)
busqueda=["mujeres","emprendedoras"]
prueba.obtencion_de_tweets(busqueda)
'''
|
{
"content_hash": "207d93b8a415f260165fc49f4739a3b7",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 247,
"avg_line_length": 32.48031496062992,
"alnum_prop": 0.5204848484848484,
"repo_name": "Ernesttt/content_analysis",
"id": "2934c217c96595269d4cabf9e39fc6a4c9850bfc",
"size": "4125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "content_analysis/wrappers/twitter_rt.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "113180"
}
],
"symlink_target": ""
}
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import initializers
from chainer.links.connection import deconvolution_nd
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.testing import parameterize
from chainer.utils import conv
@parameterize(*testing.product({
'dims': [(3, 2), (2,)],
'nobias': [True, False],
'dtype': [numpy.float32],
'use_cudnn': ['always', 'auto', 'never'],
'used_outsize': ['case1', 'case2', 'None'],
}) + testing.product({
'dims': [(4, 3, 2)],
'nobias': [False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_cudnn': ['always'],
'used_outsize': ['None'],
}))
class TestDeconvolutionND(unittest.TestCase):
def setUp(self):
N = 2
in_channels = 3
out_channels = 2
ndim = len(self.dims)
ksize = (3,) * ndim
stride = (2,) * ndim
pad = (1,) * ndim
if self.used_outsize == 'case1' or self.used_outsize == 'None':
# Use output size determined with get_deconv_outsize.
outs = tuple(
conv.get_deconv_outsize(d, k, s, p)
for (d, k, s, p) in zip(self.dims, ksize, stride, pad))
elif self.used_outsize == 'case2':
# Use possible output size other than the one determined with
# get_deconv_outsize.
outs = tuple(
conv.get_deconv_outsize(d, k, s, p) + 1
for (d, k, s, p) in zip(self.dims, ksize, stride, pad))
if self.used_outsize != 'None':
outsize = outs
else:
outsize = None
if not self.nobias:
initial_bias = initializers.Uniform(scale=1, dtype=self.dtype)
else:
initial_bias = None
self.link = deconvolution_nd.DeconvolutionND(
ndim, in_channels, out_channels, ksize, stride=stride, pad=pad,
outsize=outsize, initial_bias=initial_bias, nobias=self.nobias)
self.link.cleargrads()
x_shape = (N, in_channels) + self.dims
self.x = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
gy_shape = (N, out_channels) + outs
self.gy = numpy.random.uniform(-1, 1, gy_shape).astype(self.dtype)
self.check_forward_options = {}
self.check_backward_options = {
'eps': 1e-2, 'atol': 1e-4, 'rtol': 1e-3}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-3, 'rtol': 5e-2}
self.check_backward_options = {
'eps': 2 ** -3, 'atol': 1e-2, 'rtol': 1e-1}
def check_forward_consistency(self, link, x_data):
x_cpu = chainer.Variable(x_data)
y_cpu = link(x_cpu)
self.assertEqual(y_cpu.data.dtype, x_data.dtype)
link.to_gpu()
x_gpu = chainer.Variable(cuda.to_gpu(x_data))
y_gpu = link(x_gpu)
self.assertEqual(y_gpu.data.dtype, x_data.dtype)
testing.assert_allclose(
y_cpu.data, y_gpu.data, **self.check_forward_options)
@attr.gpu
@condition.retry(3)
def test_forward_consistency(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_forward_consistency(self.link, self.x)
def check_backward(self, link, x_data, y_grad):
params = [link.W]
if not self.nobias:
params.append(link.b)
gradient_check.check_backward(
link, x_data, y_grad, params, **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.link, self.x, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.link.to_gpu()
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_backward(
self.link, cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestDeconvolutionNDNoInitialBias(unittest.TestCase):
def test_no_initial_bias(self):
ndim = 3
ksize = 3
link = deconvolution_nd.DeconvolutionND(
ndim, 3, 2, ksize, nobias=True)
self.assertIsNone(link.b)
testing.run_module(__name__, __file__)
|
{
"content_hash": "fafe398e76f5ca367616a614e5493c7b",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 75,
"avg_line_length": 32.74809160305343,
"alnum_prop": 0.5885780885780886,
"repo_name": "aonotas/chainer",
"id": "12cf14d2381b5a0eafac46d5aa94f6db9ead221a",
"size": "4290",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/links_tests/connection_tests/test_deconvolution_nd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3357320"
}
],
"symlink_target": ""
}
|
"""Logic to update a TensorFlow model graph with quantization operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.quantize.python import common
from tensorflow.contrib.quantize.python import graph_matcher
from tensorflow.contrib.quantize.python import input_to_ops
from tensorflow.contrib.quantize.python import quant_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
# Quantizable operation types that are supported by the quantization rewrite.
_QUANTIZABLE_TYPES = {'Conv2D', 'MatMul', 'DepthwiseConv2dNative'}
# Activations that are supported by the quantization rewrite.
_ACTIVATION_TYPES = {'Relu', 'Relu6', 'Identity'}
_RELU_TYPES = {'Relu', 'Relu6'}
_QUANTIZATION_OP = {'FakeQuantWithMinMaxVars'}
_VALID_SRC_OP = {'Add', 'Mul'}
_INTERMEDIATE_OP = {'Add', 'Mul'}
_PASS_THROUGH_OP = {'Reshape', 'Identity', 'BatchToSpaceND', 'SpaceToBatchND'}
_VALID_ACTIVATION_OP = {'Relu', 'Relu6'}
def Quantize(graph,
is_training,
weight_bits=8,
activation_bits=8,
symmetric=False,
ema_decay=0.999,
quant_delay=None,
vars_collection=ops.GraphKeys.GLOBAL_VARIABLES,
scope=None):
"""Updates graph with quantization operations.
Currently we quantize the following tensors:
* Conv/MatMul: Quantize the weights if it matches.
* Activation: Quantize the output if it matches.
* Bypass/Post-activation Bypass: Quantize both input and output
if it matches.
Args:
graph: Graph to modify.
is_training: Whether quantizing training graph or eval graph.
weight_bits: Number of bits to use for quantizing weights.
activation_bits: Number of bits to use for quantizing activations.
symmetric: (Optional) If true, use symmetric quantization limits instead of
training the minimum and maximum of each quantization range separately.
ema_decay: (Optional) Float, EMA decay parameter. EMA is used to update
quantization intervals for quantizing activations (see here about EMA:
https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average).
quant_delay: (Optional, default None) Int, count of global steps for which
to delay quantization. This helps weights stabilize at the start of
training.
vars_collection: (Optional) Collection where to store the variables for
quantization interval ends.
scope: The scope to be transformed. If it's not None, only the ops which
are in this scope will be transformed.
Raises:
ValueError: When quantization fails.
"""
if scope and not scope.endswith('/'):
scope += '/'
input_to_ops_map = input_to_ops.InputToOps(graph)
quantized_ops = set()
for layer_match in _FindLayersToQuantize(graph):
# Quantize the weights.
context = _GetContextFromOp(layer_match.layer_op)
# If `scope` is given, only quantize it if the consumer of weights
# (the layer op) is in the right scope.
_InsertQuantOp(
context,
'weights_quant',
layer_match.weight_tensor.op,
input_to_ops_map.ConsumerOperations(layer_match.weight_tensor.op),
is_training,
moving_avg=False,
ema_decay=ema_decay,
quant_delay=quant_delay,
narrow_range=True,
vars_collection=vars_collection,
bits=weight_bits,
symmetric=symmetric,
consumer_scope=scope)
# Quantize the activations.
consumer_ops = input_to_ops_map.ConsumerOperations(
layer_match.activation_op)
add_context = context
if layer_match.bypass_op:
pattern_match_result = re.search(r'^(.*)/([^/]+)', context)
if pattern_match_result is not None:
add_context = pattern_match_result.group(1)
else:
add_context = ''
# If `scope` is given, only quantize it if the producer of weights
# (usually it's the layer op) is in the right scope.
_InsertQuantOp(
add_context,
'act_quant',
layer_match.activation_op,
consumer_ops,
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
symmetric=symmetric,
init_min=0.0,
producer_scope=scope)
quantized_ops.add(layer_match.activation_op)
# Quantize the inputs and output to the bypass (if it exists). The input to
# the bypass is the bias add, and the output is the activation.
if layer_match.bypass_op is not None:
# If `scope` is given, only quantize it if the both the producer and the
# consumer are in the right scope.
_InsertQuantOp(
context,
'conv_quant',
layer_match.bias_add_op,
input_to_ops_map.ConsumerOperations(layer_match.bias_add_op),
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
symmetric=symmetric,
producer_scope=scope,
consumer_scope=scope)
quantized_ops.add(layer_match.bias_add_op)
# Make sure the op following this isn't an activation. In which case, we
# shouldn't quantize it, since the activation will be Fused into the
# Add at inference time.
consumers = input_to_ops_map.ConsumerOperations(layer_match.bypass_op)
if any([consumer.type in _ACTIVATION_TYPES for consumer in consumers]):
logging.info('Skipping %s, because its followed by an activation.',
layer_match.bypass_op.name)
else:
_InsertQuantOp(
add_context,
'add_quant',
layer_match.bypass_op,
input_to_ops_map.ConsumerOperations(layer_match.bypass_op),
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
symmetric=symmetric,
producer_scope=scope,
consumer_scope=scope)
quantized_ops.add(layer_match.bypass_op)
# Quantize bypass ops that occur after the activation.
if layer_match.post_activation_bypass_op is not None:
pattern_match_result = re.search(
r'^(.*)/([^/]+)', layer_match.post_activation_bypass_op.name)
if pattern_match_result is not None:
post_activation_bypass_context = pattern_match_result.group(1)
else:
post_activation_bypass_context = ''
# If `scope` is given, only quantize it if the producer is in the right
# scope.
# Make sure the op following this isn't an activation. In which case, we
# shouldn't quantize it, since the activation will be Fused into the
# Add at inference time.
consumers = input_to_ops_map.ConsumerOperations(
layer_match.post_activation_bypass_op)
if any([consumer.type in _RELU_TYPES for consumer in consumers]):
logging.info('Skipping %s, because its followed by an activation.',
layer_match.post_activation_bypass_op.name)
else:
_InsertQuantOp(
post_activation_bypass_context,
'post_activation_bypass_quant',
layer_match.post_activation_bypass_op,
consumers,
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
symmetric=symmetric,
producer_scope=scope)
quantized_ops.add(layer_match.post_activation_bypass_op)
_QuantizeActivationLayers(
quantized_ops,
graph,
is_training,
activation_bits,
ema_decay,
quant_delay,
vars_collection,
scope=scope)
def _QuantizeActivationLayers(quantized_ops,
graph,
is_training,
activation_bits=8,
ema_decay=0.999,
quant_delay=None,
vars_collection=ops.GraphKeys.GLOBAL_VARIABLES,
scope=None):
"""Quantize intermediate activation tensors after addition and multiplication.
Args:
quantized_ops: Set of previously quantized activation ops.
graph: Graph to modify.
is_training: Whether quantizing training graph or eval graph.
activation_bits: Number of bits to use for quantizing activations.
ema_decay: (Optional) Float, EMA decay parameter. EMA is used to update
quantization intervals for quantizing activations (see here about EMA:
https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average).
quant_delay: (Optional, default None) Int, count of global steps for which
to delay quantization. This helps weights stabilize at the start of
training.
vars_collection: (Optional) Collection where to store the variables for
quantization interval ends.
scope: The scope to be transformed. If it's not None, only the ops which are
in this scope will be transformed.
Raises:
ValueError: When quantization fails.
"""
input_to_ops_map = input_to_ops.InputToOps(graph)
for op in (op for op in graph.get_operations()):
if _CheckIfQuantizableOp(op, quantized_ops):
logging.info('Inserting fake quant op activation_%s_quant after %s',
op.type, op.name)
consumers = input_to_ops_map.ConsumerOperations(op)
_InsertQuantOp(
op.name,
'activation_' + op.type + '_quant',
op,
consumers,
is_training,
moving_avg=True,
ema_decay=ema_decay,
quant_delay=quant_delay,
vars_collection=vars_collection,
bits=activation_bits,
producer_scope=scope)
def _CheckIfQuantizableOp(src_op, quantized_ops):
"""Check if the output of an op should be quantized.
Args:
src_op: op to be checked
quantized_ops: Set of previously quantized activation ops.
Returns:
Boolean specifying if output should be quantized or not.
"""
src_op_name = set([src_op.type])
if src_op in quantized_ops:
return False
if not src_op_name.intersection(_VALID_SRC_OP):
return False
# If src op is an add or a mul and the output is immediately
# followed by an activation skip
if len(src_op.outputs) == 1 and len(src_op.outputs[0].consumers()) == 1:
op_consumers = src_op.outputs[0].consumers()
if set([op_consumers[0].type]).intersection(_VALID_ACTIVATION_OP):
logging.info('Skipping quant after %s', src_op.name)
return False
# Is an Add or a Mul
input_ops = src_op.inputs
for op in input_ops:
curr_op = op.op
curr_op_type = set([curr_op.type])
while curr_op_type.intersection(_PASS_THROUGH_OP):
# Walk back through pass through ops
curr_op = curr_op.inputs[0].op
curr_op_type = set([curr_op.type])
# Now at a valid or quantizable op, need to check if
# atleast one of the inputs to a valid op is connected
# to a quantizable op via pass through ops
if (curr_op_type.intersection(_QUANTIZATION_OP) or
curr_op.name.find('delayed_quant/Merge') > 0):
return True
if curr_op_type.intersection(_INTERMEDIATE_OP):
# Check if atleast one input to intermediate_op are quantizable
for input_op in curr_op.inputs:
if _CheckIfQuantizableOp(input_op.op, quantized_ops):
return True
return False
def _FindLayersToQuantize(graph):
"""Matches layers in graph to quantize.
The following patterns get matched. Nodes surrounded by [] will be
optionally matched:
weight|folded_weight
/
conv|fc
|
[batch_to_space_nd]
|
[post_conv_correction]
|
[biasadd|folded_bias]
|
[bypass]
|
activation
|
[post_activation_bypass]
Match replacements:
If weight|folded_weight is found, FakeQuant is added afterwards.
If bypass is found, FakeQuant is added before and after.
If activation is found, FakeQuant is added afterwards.
If post_activation_bypass is found, FakeQuant is added afterwards.
Args:
graph: Graph to perform match on.
Returns:
list of _LayerMatches.
"""
input_pattern = graph_matcher.OpTypePattern('*')
weight_var_pattern = graph_matcher.OpTypePattern('Variable|VariableV2')
weight_partition_identity_pattern = graph_matcher.OpTypePattern(
'Identity', inputs=[weight_var_pattern])
weight_partition_concat_pattern = graph_matcher.OpTypePattern(
'ConcatV2', inputs=[weight_partition_identity_pattern, '*', '*'])
weight_identity_pattern = graph_matcher.OpTypePattern(
'Identity',
inputs=[
graph_matcher.OneofPattern([
weight_partition_identity_pattern,
weight_partition_concat_pattern,
weight_var_pattern,
])
])
weight_resource_var_pattern = graph_matcher.OpTypePattern('ReadVariableOp')
folded_weight_pattern = graph_matcher.OpTypePattern('Mul')
# The weights inputs to the layer operation can either be from the Variable or
# the folded weight (Mul).
layer_pattern = graph_matcher.OpTypePattern(
'|'.join(_QUANTIZABLE_TYPES),
inputs=[
input_pattern,
graph_matcher.OneofPattern([
weight_identity_pattern, weight_resource_var_pattern,
folded_weight_pattern
])
],
ordered_inputs=False)
# For atrous convolutions a BatchToSpaceND will occur after the depthwise
# convolution.
batch_to_space_pattern = graph_matcher.OpTypePattern(
'BatchToSpaceND',
inputs=[
layer_pattern,
graph_matcher.OpTypePattern('*'),
graph_matcher.OpTypePattern('*')
])
layer_output_pattern = graph_matcher.OneofPattern(
[batch_to_space_pattern, layer_pattern])
# For separable convolutions, we are looking for a conv, followed by a conv
# with no activations between the two.
sep_conv_pattern = graph_matcher.OpTypePattern(
'|'.join(_QUANTIZABLE_TYPES),
inputs=[
graph_matcher.OneofPattern([layer_output_pattern]),
graph_matcher.OpTypePattern('*')
],
ordered_inputs=False)
folded_bias_mul_pattern = graph_matcher.OpTypePattern(
'Mul',
inputs=[graph_matcher.OpTypePattern('*'), layer_output_pattern],
ordered_inputs=False)
post_layer_op_correction_pattern = graph_matcher.OpTypePattern(
'Add',
inputs=[folded_bias_mul_pattern,
graph_matcher.OpTypePattern('*')],
ordered_inputs=False)
folded_bias_add_pattern = graph_matcher.OpTypePattern(
'Add',
inputs=[
post_layer_op_correction_pattern,
graph_matcher.OpTypePattern('*')
],
ordered_inputs=False)
# batch_norms with forced updates have an Identity operation at the end.
# TODO(suharshs): Find a way to easily skip extra Identity operations. The
# current issue is that doing so can often match patterns across many layers
# incorrectly.
batch_norm_identity = graph_matcher.OpTypePattern(
'Identity', inputs=[folded_bias_add_pattern])
bias_add_pattern = graph_matcher.OpTypePattern(
'Add|BiasAdd', inputs=[layer_output_pattern, '*'], ordered_inputs=False)
# The bias can come from the bias add or the folded bias add.
bypass_pattern = graph_matcher.OpTypePattern(
'Add',
inputs=[
graph_matcher.OneofPattern(
[bias_add_pattern, folded_bias_add_pattern, batch_norm_identity]),
'*'
],
ordered_inputs=False)
# The input to the activation can come from bias add, fold bias add, the
# bypasses.
# TODO(suharshs): We should ideally skip Identity operations instead of
# treating them as activations.
activation_pattern = graph_matcher.OpTypePattern(
'|'.join(_ACTIVATION_TYPES) + '|Identity',
inputs=[
graph_matcher.OneofPattern([
bias_add_pattern,
folded_bias_add_pattern,
batch_norm_identity,
bypass_pattern,
layer_pattern,
])
])
post_activation_bypass_pattern = graph_matcher.OpTypePattern(
'Add', inputs=['*', activation_pattern], ordered_inputs=False)
# The order of the following matching blocks is very important. Since matches
# aren't guaranteed to be disjoint, we structure matches from largest to
# smallest to guarantee that the largest match always wins. Additionally, we
# ensure that we don't match layers multiple times.
layer_matches = []
# We use matched_layer_set to ensure that layers aren't matched multiple
# times.
matched_layer_set = set()
# First, we match layers that have a post activation bypass. We do this first
# to ensure we don't match only the first part of this layer, missing the
# post activation bypass node.
post_activation_bypass_layer_matcher = graph_matcher.GraphMatcher(
post_activation_bypass_pattern)
for match_result in post_activation_bypass_layer_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(weight_resource_var_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(folded_weight_pattern)
activation_op = match_result.get_op(activation_pattern)
bias_add_op = match_result.get_op(bias_add_pattern)
if bias_add_op is None:
bias_add_op = match_result.get_op(folded_bias_add_pattern)
bypass_op = match_result.get_op(bypass_pattern)
post_activation_bypass_op = match_result.get_op(
post_activation_bypass_pattern)
if layer_op not in matched_layer_set:
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, bypass_op,
post_activation_bypass_op, bias_add_op))
# Now, we match the basic layer ending at an activation. We may get duplicate
# matches from above, but we don't add them to layer_matches.
layer_matcher = graph_matcher.GraphMatcher(activation_pattern)
for match_result in layer_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(weight_resource_var_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(folded_weight_pattern)
activation_op = match_result.get_op(activation_pattern)
bias_add_op = match_result.get_op(bias_add_pattern)
if bias_add_op is None:
bias_add_op = match_result.get_op(folded_bias_add_pattern)
bypass_op = match_result.get_op(bypass_pattern)
if layer_op not in matched_layer_set:
if not _IsSkipLayer(activation_op):
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, bypass_op, None,
bias_add_op))
# Match the final layer, where there may not be an activation and instead
# the output of the final BiasAdd must be quantized. So we treat the BiasAdd
# as the 'activation_op' in the _LayerMatch, to ensure that it's output is
# quantized.
final_layer_matcher = graph_matcher.GraphMatcher(
graph_matcher.OneofPattern([bias_add_pattern, folded_bias_add_pattern]))
for match_result in final_layer_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(weight_resource_var_pattern)
if weight_tensor is None:
weight_tensor = match_result.get_tensor(folded_weight_pattern)
activation_op = match_result.get_op(bias_add_pattern)
if activation_op is None:
activation_op = match_result.get_op(folded_bias_add_pattern)
if layer_op not in matched_layer_set:
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, None, None, None))
# Look for separable convolutions here
sep_conv_matcher = graph_matcher.GraphMatcher(sep_conv_pattern)
for match_result in sep_conv_matcher.match_graph(graph):
layer_op = match_result.get_op(layer_pattern)
weight_tensor = match_result.get_tensor(weight_identity_pattern)
activation_op = match_result.get_op(layer_pattern)
if layer_op not in matched_layer_set:
matched_layer_set.add(layer_op)
layer_matches.append(
_LayerMatch(layer_op, weight_tensor, activation_op, None, None, None))
return layer_matches
def _IsSkipLayer(activation_op):
"""Skip quantizing conv->identity->Batch norm layers.
Args:
activation_op: Activation op detected by layer matching pattern
Returns:
skip_layer: boolean, true when conv->identity->batch norm is detected.
"""
# Exclude quantization of conv->identity->BN,
# After folding, this part corresponds to estimation of mean and variance
# and should not be quantized.
skip_layer = False
if activation_op.type == 'Identity' and len(activation_op.outputs) == 1:
if len(activation_op.outputs[0].consumers()) == 1:
consumer = activation_op.outputs[0].consumers()[0]
if consumer.type == 'FusedBatchNorm':
skip_layer = True
logging.info(
'Skipping quantizing %s, because it is the output of a conv/fc '
'followed by a identity, feeding a fused batch norm.',
activation_op.name)
return skip_layer
class _LayerMatch(object):
"""Contains all information related to a matched Layer."""
def __init__(self, layer_op, weight_tensor, activation_op, bypass_op,
post_activation_bypass_op, bias_add_op):
self._layer_op = layer_op
self._weight_tensor = weight_tensor
self._activation_op = activation_op
self._bypass_op = bypass_op
self._post_activation_bypass_op = post_activation_bypass_op
self._bias_add_op = bias_add_op
@property
def layer_op(self):
return self._layer_op
@property
def weight_tensor(self):
return self._weight_tensor
@property
def activation_op(self):
return self._activation_op
@property
def bypass_op(self):
return self._bypass_op
@property
def post_activation_bypass_op(self):
return self._post_activation_bypass_op
@property
def bias_add_op(self):
return self._bias_add_op
def _FollowedByFakeQuant(tensor):
"""Returns True if the tensor is followed by a FakeQuant."""
fake_quant_ops = set([
'FakeQuantWithMinMaxVars', 'FakeQuantWithMinMaxArgs',
'FakeQuantWithMinMaxVarsPerChannel'
])
pass_through_ops = set(['Reshape', 'Identity'])
consumers = tensor.consumers()
while consumers:
c = consumers.pop()
if c.type in fake_quant_ops:
return True
elif c.type in pass_through_ops:
for output in c.outputs:
consumers.extend(output.consumers())
return False
def _InsertQuantOp(context,
name,
producer,
consumers,
is_training,
moving_avg=True,
init_min=-6.0,
init_max=6.0,
bits=8,
symmetric=False,
ema_decay=0.999,
quant_delay=None,
vars_collection=ops.GraphKeys.GLOBAL_VARIABLES,
narrow_range=False,
producer_scope=None,
consumer_scope=None):
"""Inserts a quant op between a producer op and (multiple) consumer ops.
Args:
context: Context where producer and consumer operations are nested.
name: Name for the new quantization op within the context.
producer: Producer operation of the pairs where quantization will be
inserted.
consumers: Consumer operations of the pairs.
is_training: Whether quantizing training graph or eval graph.
moving_avg: Specifies whether to use exponential moving average or just
the last value seen.
init_min: Starting minimum value for the new quantization op.
init_max: Starting maximum value for the new quantization op.
bits: Number of bits to use for quantization, must be between 2 and 8.
symmetric: (Optional) If true, use symmetric quantization limits instead of
training the minimum and maximum of each quantization range separately.
ema_decay: (Optional) Float, EMA decay parameter. EMA is used to update
quantization intervals for quantizing activations (see here about EMA:
https://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average).
quant_delay: (Optional, default None) Int, count of global steps for which
to delay quantization. This helps weights stabilize at the start of
training.
vars_collection: (Optional) Collection where to store the variables for
quantization interval ends.
narrow_range: Whether to use the narrow quantization range
[1; 2^bits - 1] or wide range [0; 2^bits - 1].
producer_scope: The restriction of producer scope. If not None, the new op
will be inserted only when the producer is in this scope.
consumer_scope: The restriction of producer scope. If not None, the new op
will be inserted only when all the consumers are in this scope.
Raises:
ValueError: When producer operation is not directly connected to the
consumer operation.
"""
if producer_scope and not producer.name.startswith(producer_scope):
logging.info(
'_InsertQuantOp ignores context="%s" name="%s" '
'because producer "%s" is not in scope "%s"',
context, name, producer.name, producer_scope)
return
if consumer_scope:
consumers_in_scope = []
for consumer in consumers:
if consumer.name.startswith(consumer_scope):
consumers_in_scope.append(consumer)
else:
logging.info(
'_InsertQuantOp context="%s" name="%s" ignores '
'consumer "%s" because it is not in scope "%s"',
context, name, consumer.name, consumer_scope)
return
consumers = consumers_in_scope
name_prefix = _AddContextToName(context, name)
# This is needed on TPU where name_scope == 'TPUReplicate/loop', and
# name_prefix starts with 'TPUReplicate/loop/'; without dropping it
# variables are created as TPUReplicate/loop/TPUReplicate/loop/..., which
# breaks things later.
name_scope = ops.get_name_scope()
if name_scope:
name_prefix = common.DropStringPrefix(name_prefix, name_scope + '/')
inputs = producer.outputs[0]
# Prevent ops from being quantized multiple times. Bypass ops can sometimes
# overlap between multiple matches, so we need to ensure that we don't
# add duplicate FakeQuant operations.
if _FollowedByFakeQuant(inputs):
return
if moving_avg:
quant = (
quant_ops.MovingAvgQuantize(
inputs,
init_min=init_min,
init_max=init_max,
ema_decay=ema_decay,
is_training=is_training,
num_bits=bits,
symmetric=symmetric,
narrow_range=narrow_range,
vars_collection=vars_collection,
name_prefix=name_prefix))
else:
quant = (
quant_ops.LastValueQuantize(
inputs,
init_min=init_min,
init_max=init_max,
is_training=is_training,
num_bits=bits,
symmetric=symmetric,
narrow_range=narrow_range,
vars_collection=vars_collection,
name_prefix=name_prefix))
if quant_delay and quant_delay > 0:
activate_quant = math_ops.greater_equal(
common.CreateOrGetQuantizationStep(),
quant_delay,
name=name_prefix + '/activate_quant')
quant = control_flow_ops.cond(
activate_quant,
lambda: quant,
lambda: inputs,
name=name_prefix + '/delayed_quant')
if consumers:
tensors_modified_count = common.RerouteTensor(
quant, inputs, can_modify=consumers)
# Some operations can have multiple output tensors going to the same
# consumer. Since consumers is a set, we need to ensure that
# tensors_modified_count is greater than or equal to the length of the set
# of consumers.
if tensors_modified_count < len(consumers):
raise ValueError('No inputs quantized for ops: [%s]' % ', '.join(
[consumer.name for consumer in consumers]))
def _GetContextFromOp(op):
"""Gets the root context name from the op name."""
context_re = re.search(r'^(.*)/([^/]+)', op.name)
if context_re:
return context_re.group(1)
return ''
def _AddContextToName(context, name):
"""Adds the context to the name if it exists."""
if not context:
return name
return context + '/' + name
|
{
"content_hash": "711dac736f2294fba0b6f6fad69fca8c",
"timestamp": "",
"source": "github",
"line_count": 773,
"max_line_length": 80,
"avg_line_length": 38.10866752910737,
"alnum_prop": 0.6600923348496164,
"repo_name": "alshedivat/tensorflow",
"id": "92ca3f203954414159954f7f5d220f95b17967d0",
"size": "30147",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/quantize/python/quantize.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "439824"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50398044"
},
{
"name": "CMake",
"bytes": "199209"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1276639"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "871083"
},
{
"name": "Jupyter Notebook",
"bytes": "2604347"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "61311"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40038696"
},
{
"name": "RobotFramework",
"bytes": "890"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "486609"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
from django import forms
from django.forms import DateTimeField
from .models import Article
class ArticleForm(forms.ModelForm):
created_on = DateTimeField(widget=forms.widgets.DateTimeInput())
class Meta:
model = Article
fields = '__all__'
|
{
"content_hash": "d54fd8cc7847804f6b55a86c396b26e8",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 68,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.7126865671641791,
"repo_name": "pfskiev/lisbon",
"id": "347c3c2aca72312a99e5366a20a1e97827170bc0",
"size": "268",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/news/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12697"
},
{
"name": "HTML",
"bytes": "79774"
},
{
"name": "Python",
"bytes": "79117"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import enum
from collections.abc import Sequence
__all__ = ['AddressPart', 'MatchType', 'SizeComparator', 'str_list', 'unquote']
class AddressPart(enum.Enum):
LOCALPART = enum.auto()
DOMAIN = enum.auto()
ALL = enum.auto()
@classmethod
def of(cls, flag: str | None) -> AddressPart:
if not flag:
return cls.ALL
elif flag == ':localpart':
return cls.LOCALPART
elif flag == ':domain':
return cls.DOMAIN
elif flag == ':all':
return cls.ALL
else:
raise NotImplementedError(flag)
class MatchType(enum.Enum):
IS = enum.auto()
CONTAINS = enum.auto()
MATCHES = enum.auto()
@classmethod
def of(cls, flag: str | None) -> MatchType:
if not flag:
return cls.IS
elif flag == ':is':
return cls.IS
elif flag == ':contains':
return cls.CONTAINS
elif flag == ':matches':
return cls.MATCHES
else:
raise NotImplementedError(flag)
class SizeComparator(enum.Enum):
OVER = enum.auto()
UNDER = enum.auto()
@classmethod
def of(cls, flag: str) -> SizeComparator:
if flag == ':over':
return cls.OVER
elif flag == ':under':
return cls.UNDER
else:
raise NotImplementedError(flag)
def str_list(value: str | Sequence[str]) -> Sequence[str]:
if isinstance(value, str):
return [unquote(value)]
else:
return [unquote(val) for val in value]
def unquote(value: str) -> str:
if value[0] == '"' and value[-1] == '"':
return value[1:-1]
else:
return value
|
{
"content_hash": "c604fa26eb66699ec0bed47c0f55a29e",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 79,
"avg_line_length": 24.02777777777778,
"alnum_prop": 0.5531791907514451,
"repo_name": "icgood/pymap",
"id": "c69ef0d5e3001f75e8d5146b83c9ab176d9c894d",
"size": "1731",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pymap/sieve/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "390"
},
{
"name": "Lua",
"bytes": "15194"
},
{
"name": "Python",
"bytes": "857930"
},
{
"name": "Shell",
"bytes": "389"
}
],
"symlink_target": ""
}
|
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
from nnabla.ext_utils import get_extension_context, list_extensions
from nnabla.utils.inspection import NanInfTracer
from .models import simple_cnn
def _refresh_inputs_grad(f):
for i in f.inputs:
i.grad.zero()
@pytest.mark.parametrize("batch_size", [8])
@pytest.mark.parametrize("n_class", [5])
@pytest.mark.parametrize("ext_name", list_extensions())
@pytest.mark.parametrize("trace_nan", [False, True])
@pytest.mark.parametrize("trace_inf", [False, True])
def test_nan_inf_tracer(batch_size, n_class, ext_name, trace_nan, trace_inf):
nn.clear_parameters()
ctx = get_extension_context(ext_name)
nn.set_default_context(ctx)
x = nn.Variable.from_numpy_array(
np.random.normal(size=(batch_size, 3, 16, 16)))
t = nn.Variable.from_numpy_array(np.random.randint(low=0, high=n_class,
size=(batch_size, 1)))
y = simple_cnn(x, t, n_class)
must_be_inf = y / F.constant(0, shape=y.shape)
must_be_nan = must_be_inf / must_be_inf
# Refresh all arrays once so as to ensure all grad values are 0.
must_be_nan.visit(_refresh_inputs_grad)
nit = NanInfTracer(trace_nan=trace_nan, trace_inf=trace_inf)
# can be run at any cases without exception.
with nit.trace():
y.forward(clear_no_need_grad=True,
function_post_hook=nit.forward_post_hook)
y.backward(clear_buffer=True,
function_post_hook=nit.backward_post_hook)
nit.check() # this call can also work without exception.
# check nan
if trace_nan:
with pytest.raises(ValueError):
with nit.trace():
must_be_nan.forward(clear_buffer=True,
function_post_hook=nit.forward_post_hook)
with pytest.raises(ValueError):
with nit.trace():
must_be_nan.backward(clear_buffer=True,
function_post_hook=nit.backward_post_hook)
must_be_nan.forward(clear_buffer=True,
function_post_hook=nit.forward_post_hook)
with pytest.raises(ValueError):
nit.check()
must_be_nan.backward(clear_buffer=True,
function_post_hook=nit.backward_post_hook)
with pytest.raises(ValueError):
nit.check()
# check inf
if trace_inf:
with pytest.raises(ValueError):
with nit.trace():
must_be_inf.forward(clear_buffer=True,
function_post_hook=nit.forward_post_hook)
must_be_inf.forward(clear_buffer=True,
function_post_hook=nit.forward_post_hook)
with pytest.raises(ValueError):
nit.check()
|
{
"content_hash": "09736c0fe752d164c75c088237f5e046",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 79,
"avg_line_length": 33.348837209302324,
"alnum_prop": 0.599721059972106,
"repo_name": "sony/nnabla",
"id": "78efc986dcdca3af71bcea957d699e88a0962a0b",
"size": "3455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/test/utils/inspection/test_nan_inf_tracer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "25938"
},
{
"name": "C++",
"bytes": "2590231"
},
{
"name": "CMake",
"bytes": "35358"
},
{
"name": "Cython",
"bytes": "180959"
},
{
"name": "Dockerfile",
"bytes": "5431"
},
{
"name": "Jupyter Notebook",
"bytes": "540006"
},
{
"name": "Makefile",
"bytes": "24294"
},
{
"name": "Python",
"bytes": "5311538"
},
{
"name": "Shell",
"bytes": "4750"
}
],
"symlink_target": ""
}
|
from collections import namedtuple
from scipy.optimize import newton
Flow = namedtuple('Flow', ['period','positive', 'negative'])
def netflow(flow):
return sum(flow.positive) - sum(flow.negative)
def present_value(flow, rate):
return netflow(flow)/((1 + rate)**flow.period)
def net_present_value(rate,flows):
return sum(present_value(flow, rate) for flow in flows)
def irr(flows):
return newton(net_present_value, 0.1, args=(flows,))
def payback(flows):
to_give = abs(sum(netflow(flow) for flow in flows[:-1]))/netflow(flows[-1])
return flows[-1].period - 1 + to_give
|
{
"content_hash": "dfba89eecb1ac7ea6e4b4ed3b57ff2bc",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 28.523809523809526,
"alnum_prop": 0.6928213689482471,
"repo_name": "mitra-varuna/supply_chain_network",
"id": "af61ead81954543506217f8b494ee925f2a0c2e4",
"size": "599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cash_flow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "1907"
},
{
"name": "Python",
"bytes": "1327"
}
],
"symlink_target": ""
}
|
import os
import sys
import zipfile
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
BASEURL = 'http://www.citygml.org/fileadmin/count.php?f=fileadmin/citygml/docs/'
FILES = [
'geoRES_testdata_v1.0.0',
'waldbruecke_v1.0.0',
'CityGML_2.0_Test_Dataset_2012-04-23',
'Berlin_Alexanderplatz_v0.4.0',
]
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
pwd = os.getcwd()
os.chdir(os.path.join(dname, 'datasets'))
for filename in FILES:
if not os.path.exists(filename) and not os.path.exists(filename + '.gml') and not os.path.exists(filename + '.xml'):
if not os.path.exists(filename + '.zip'):
print('Downloading {}'.format(filename))
if filename == 'CityGML_2.0_Test_Dataset_2012-04-23':
url = 'http://dl.dropbox.com/u/24313387/CityGML_2.0_Test_Dataset_2012-04-23.zip'
else:
url = BASEURL + filename + '.zip'
urlretrieve(url, filename + '.zip')
print('Extracting {}'.format(filename))
with zipfile.ZipFile(filename + '.zip', 'r') as zip:
zip.extractall('.')
os.chdir(pwd)
projdir = os.path.dirname(dname)
sys.path.insert(0, projdir)
|
{
"content_hash": "001e4e84ce251de972cb4a82f860b26f",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 120,
"avg_line_length": 31.897435897435898,
"alnum_prop": 0.6366559485530546,
"repo_name": "ctu-yfsg/2015-f-citygml",
"id": "4f594e021503f7d51b97d2acb7a2517f032960a2",
"size": "1244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26812"
}
],
"symlink_target": ""
}
|
import mock
from nova.tests.virt.xenapi import stubs
from nova import utils
from nova.virt.xenapi.client import objects
class XenAPISessionObjectTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(XenAPISessionObjectTestCase, self).setUp()
self.session = mock.Mock()
self.obj = objects.XenAPISessionObject(self.session, "FAKE")
def test_call_method_via_attr(self):
self.session.call_xenapi.return_value = "asdf"
result = self.obj.get_X("ref")
self.assertEqual(result, "asdf")
self.session.call_xenapi.assert_called_once_with("FAKE.get_X", "ref")
class ObjectsTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(ObjectsTestCase, self).setUp()
self.session = mock.Mock()
def test_VM(self):
vm = objects.VM(self.session)
vm.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VM.get_X", "ref")
def test_SR(self):
sr = objects.SR(self.session)
sr.get_X("ref")
self.session.call_xenapi.assert_called_once_with("SR.get_X", "ref")
def test_VDI(self):
vdi = objects.VDI(self.session)
vdi.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VDI.get_X", "ref")
def test_VBD(self):
vbd = objects.VBD(self.session)
vbd.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VBD.get_X", "ref")
def test_PBD(self):
pbd = objects.PBD(self.session)
pbd.get_X("ref")
self.session.call_xenapi.assert_called_once_with("PBD.get_X", "ref")
def test_PIF(self):
pif = objects.PIF(self.session)
pif.get_X("ref")
self.session.call_xenapi.assert_called_once_with("PIF.get_X", "ref")
def test_VLAN(self):
vlan = objects.VLAN(self.session)
vlan.get_X("ref")
self.session.call_xenapi.assert_called_once_with("VLAN.get_X", "ref")
def test_host(self):
host = objects.Host(self.session)
host.get_X("ref")
self.session.call_xenapi.assert_called_once_with("host.get_X", "ref")
def test_network(self):
network = objects.Network(self.session)
network.get_X("ref")
self.session.call_xenapi.assert_called_once_with("network.get_X",
"ref")
def test_pool(self):
pool = objects.Pool(self.session)
pool.get_X("ref")
self.session.call_xenapi.assert_called_once_with("pool.get_X", "ref")
class VBDTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(VBDTestCase, self).setUp()
self.session = mock.Mock()
self.session.VBD = objects.VBD(self.session)
def test_plug(self):
self.session.VBD.plug("vbd_ref", "vm_ref")
self.session.call_xenapi.assert_called_once_with("VBD.plug", "vbd_ref")
@mock.patch.object(utils, 'synchronized')
def test_vbd_plug_check_synchronized(self, mock_synchronized):
self.session.VBD.plug("vbd_ref", "vm_ref")
mock_synchronized.assert_called_once_with("xenapi-vbd-vm_ref")
def test_unplug(self):
self.session.VBD.unplug("vbd_ref", "vm_ref")
self.session.call_xenapi.assert_called_once_with("VBD.unplug",
"vbd_ref")
@mock.patch.object(utils, 'synchronized')
def test_vbd_plug_check_synchronized(self, mock_synchronized):
self.session.VBD.unplug("vbd_ref", "vm_ref")
mock_synchronized.assert_called_once_with("xenapi-vbd-vm_ref")
|
{
"content_hash": "2bc476838c960396f6f0ca78798b0117",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 79,
"avg_line_length": 34.70873786407767,
"alnum_prop": 0.6187412587412587,
"repo_name": "afrolov1/nova",
"id": "a4e5d35804d1245a6d1103e2fadd7b9b1acdc3dd",
"size": "4212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/virt/xenapi/client/test_objects.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14057622"
},
{
"name": "Shell",
"bytes": "17451"
}
],
"symlink_target": ""
}
|
import os
import sys
from warnings import warn
from pathlib import Path
import shutil
import click
from .pypeit_make_1d_notebook import make_viz_notebook
inf = float('inf')
coadd1d_template = """
[coadd1d]
coaddfile = '{basename}.fits'
flux_value = False
ex_value = {ext_mode}
coadd1d read
{file_ext_pairs}
coadd1d end
"""
@click.command()
@click.argument('science_path', type=click.Path(exists=True, file_okay=False))
@click.option('-s', '--spatial', default=-1, help='negative means take all')
@click.option('-t', '--spatial-tolerance', default=3., help='in pixels')
@click.option('--slit', default=-1., help='negative means take all')
@click.option('--slit-tolerance', default=3., help='in pixels')
@click.option('-m', '--ext-mode', default='OPT',)
@click.option('-n', '--coadd-name', default='coadd1d',)
@click.option('--list', 'list_', is_flag=True)
@click.option('--coadd/--no-coadd', default=False)
@click.option('--notebook/--no-notebook', default=False)
def main(science_path, spatial, spatial_tolerance, slit, slit_tolerance,
ext_mode, coadd_name, list_, coadd, notebook):
"""
STUFF
"""
retcode = None
science_path = Path(science_path)
spec1dtxts = list(science_path.glob('spec1d*.txt'))
coadd1d_path = science_path / 'coadd1d'
fns = []
exts = []
rows = []
if spatial < 0:
spatial_bounds = [-inf, inf]
else:
spatial_bounds = [spatial - spatial_tolerance, spatial + spatial_tolerance]
if slit < 0:
slit_bounds = [-inf, inf]
else:
slit_bounds = [slit - slit_tolerance, slit + slit_tolerance]
for txtfn in spec1dtxts:
fitsfn = str(txtfn.absolute())[:-4] + '.fits'
rows.append(fitsfn)
for row in Table.read(txtfn, format='ascii.fixed_width'):
if ((spatial_bounds[0] < row['spat_pixpos'] < spatial_bounds[1]) and
(slit_bounds[0] < row['slit'] < slit_bounds[1])):
fns.append(fitsfn)
exts.append(row['name'])
if len(rows) < 2:
rows.append(str(row))
else:
rows.append(str(row).split('\n')[-1])
print('Matched', len(exts), 'traces from', len(spec1dtxts), 'spec1d files')
file_ext_pairs = '\n'.join([f' {fn} {ext}' for fn, ext in zip(fns, exts)])
if list_:
print('\n'.join(rows))
else:
if coadd1d_path.exists():
warn(f'Coadd directory {coadd1d_path} already exists...')
else:
coadd1d_path.mkdir()
basename = str(coadd1d_path / coadd_name)
outfn = basename + '.coadd'
towrite = coadd1d_template.format(**locals())
print('Writing file', outfn)
with open(outfn, 'w') as f:
f.write(towrite)
if coadd:
parfn = basename + '.par'
# when https://github.com/pypeit/PypeIt/pull/1138 is in
#sys.exit(os.system(f'pypeit_coadd_1dspec --par_outfile {parfn} {outfn} '))
retcode = os.system(f'pypeit_coadd_1dspec {outfn} ')
if retcode == 0:
wrong_parfn = Path('coadd1d.par')
wrong_parfn.replace(parfn)
if notebook:
make_viz_notebook(coadd1d_path, baseout='view_spec1d_coadd', prefix='')
if retcode is None:
return 0
else:
return retcode
if __name__ == '__main__':
main()
|
{
"content_hash": "83900d649057a7c698886994e506bb3b",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 87,
"avg_line_length": 31.165137614678898,
"alnum_prop": 0.5825728584044746,
"repo_name": "eteq/erikutils",
"id": "b3d833da2b8c2b1d06488156504b7d932d16938e",
"size": "3397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "erikutils/pypeit_helpers/pypeit_make_1d_coadd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "87716"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ContactMessage'
db.create_table(u'pages_contactmessage', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('status', self.gf('django.db.models.fields.CharField')(default='new', max_length=20)),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.BlueBottleUser'], null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=200)),
('message', self.gf('django.db.models.fields.TextField')()),
('creation_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modification_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'pages', ['ContactRequest'])
def backwards(self, orm):
# Deleting model 'ContactRequest'
db.delete_table(u'pages_contactrequest')
models = {
u'accounts.bluebottleuser': {
'Meta': {'object_name': 'BlueBottleUser'},
'about': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'}),
'availability': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'available_time': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'birthdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'newsletter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'picture': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'primary_language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'share_money': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'share_time_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'user_type': ('django.db.models.fields.CharField', [], {'default': "'person'", 'max_length': '25'}),
'username': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'why': ('django.db.models.fields.TextField', [], {'max_length': '265', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'fluent_contents.placeholder': {
'Meta': {'unique_together': "(('parent_type', 'parent_id', 'slot'),)", 'object_name': 'Placeholder'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'m'", 'max_length': '1'}),
'slot': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'pages.contactmessage': {
'Meta': {'object_name': 'ContactRequest'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.BlueBottleUser']", 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modification_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20'}),
'message': ('django.db.models.fields.TextField', [], {})
},
u'pages.page': {
'Meta': {'ordering': "('language', 'slug')", 'unique_together': "(('language', 'slug'),)", 'object_name': 'Page'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.BlueBottleUser']"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'modification_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'draft'", 'max_length': '20', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['pages']
|
{
"content_hash": "a0d69921b77ea3d1fceff4777d868340",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 187,
"avg_line_length": 78.27272727272727,
"alnum_prop": 0.5629113433991483,
"repo_name": "gannetson/sportschooldeopenlucht",
"id": "70201461593978be58f05dcd5b37d17411237798",
"size": "10356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/pages/migrations/0002_auto__add_contactmessage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "828459"
},
{
"name": "DOT",
"bytes": "4627"
},
{
"name": "JavaScript",
"bytes": "325860"
},
{
"name": "Python",
"bytes": "8019493"
},
{
"name": "Ruby",
"bytes": "955"
},
{
"name": "Shell",
"bytes": "1099524"
}
],
"symlink_target": ""
}
|
"""
tests.__init__
~~~~~~~~~~~~~~
Tests initialization.
"""
import betamax
from homeassistant import util
from homeassistant.util import location
with betamax.Betamax.configure() as config:
config.cassette_library_dir = 'tests/cassettes'
# Automatically called during different setups. Too often forgotten
# so mocked by default.
location.detect_location_info = lambda: location.LocationInfo(
ip='1.1.1.1',
country_code='US',
country_name='United States',
region_code='CA',
region_name='California',
city='San Diego',
zip_code='92122',
time_zone='America/Los_Angeles',
latitude='2.0',
longitude='1.0',
use_fahrenheit=True,
)
location.elevation = lambda latitude, longitude: 0
util.get_local_ip = lambda: '127.0.0.1'
|
{
"content_hash": "8a0767cce9936ab9926dc2972d5e9ab3",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 67,
"avg_line_length": 24.03125,
"alnum_prop": 0.6892067620286085,
"repo_name": "coteyr/home-assistant",
"id": "1f18116b24baf7b896b1647c509e82d76f4e40e0",
"size": "769",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1481041"
},
{
"name": "Python",
"bytes": "1876270"
},
{
"name": "Shell",
"bytes": "3570"
}
],
"symlink_target": ""
}
|
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.model_selection import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also make a hastie_10_2 dataset
hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
hastie_X = hastie_X.astype(np.float32)
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.94, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", "mae", "friedman_mse")):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, criterion, X, y):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,
random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
@skip_if_32bit
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, name, criterion, X, y
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse", "mae"]):
yield check_importances, name, criterion, X, y
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in np.bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name):
X, y = hastie_X, hastie_y
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1, random_state=0).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1,
random_state=0).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name
def check_min_samples_split(name):
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_split=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=0).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_split=1.1).fit, X, y)
est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
est.fit(X, y)
node_idx = est.estimators_[0].tree_.children_left != -1
node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
assert_greater(np.min(node_samples), len(X) * 0.5 - 1,
"Failed with {0}".format(name))
def test_min_samples_split():
for name in FOREST_ESTIMATORS:
yield check_min_samples_split, name
def check_min_samples_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test boundary value
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
ForestEstimator(min_samples_leaf=0).fit, X, y)
est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), len(X) * 0.25 - 1,
"Failed with {0}".format(name))
def test_min_samples_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name
def check_min_weight_fraction_leaf(name):
X, y = hastie_X, hastie_y
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac, n_estimators=1,
random_state=0)
if "RandomForest" in name:
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(n_estimators=1,
random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Using a Python 2.x list as the sample_weight parameter used to raise
# an exception. This test makes sure such code will now run correctly.
clf = ForestClassifier()
sample_weight = [1.] * len(iris.data)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='balanced', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = hastie_X, hastie_y
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
def check_decision_path(name):
X, y = hastie_X, hastie_y
n_samples = X.shape[0]
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
est.fit(X, y)
indicator, n_nodes_ptr = est.decision_path(X)
assert_equal(indicator.shape[1], n_nodes_ptr[-1])
assert_equal(indicator.shape[0], n_samples)
assert_array_equal(np.diff(n_nodes_ptr),
[e.tree_.node_count for e in est.estimators_])
# Assert that leaves index are correct
leaves = est.apply(X)
for est_id in range(leaves.shape[1]):
leave_indicator = [indicator[i, n_nodes_ptr[est_id] + j]
for i, j in enumerate(leaves[:, est_id])]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
def test_decision_path():
for name in FOREST_CLASSIFIERS:
yield check_decision_path, name
for name in FOREST_REGRESSORS:
yield check_decision_path, name
def test_min_impurity_split():
# Test if min_impurity_split of base estimators is set
# Regression test for #8006
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [RandomForestClassifier, RandomForestRegressor,
ExtraTreesClassifier, ExtraTreesRegressor]
for Estimator in all_estimators:
est = Estimator(min_impurity_split=0.1)
est = assert_warns_message(DeprecationWarning, "min_impurity_decrease",
est.fit, X, y)
for tree in est.estimators_:
assert_equal(tree.min_impurity_split, 0.1)
def test_min_impurity_decrease():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [RandomForestClassifier, RandomForestRegressor,
ExtraTreesClassifier, ExtraTreesRegressor]
for Estimator in all_estimators:
est = Estimator(min_impurity_decrease=0.1)
est.fit(X, y)
for tree in est.estimators_:
# Simply check if the parameter is passed on correctly. Tree tests
# will suffice for the actual working of this param
assert_equal(tree.min_impurity_decrease, 0.1)
|
{
"content_hash": "bdb4c7c0f91cd065bc3de29fe3508edc",
"timestamp": "",
"source": "github",
"line_count": 1211,
"max_line_length": 86,
"avg_line_length": 35.49215524360033,
"alnum_prop": 0.6254158814359834,
"repo_name": "ldirer/scikit-learn",
"id": "b964ed768d85e2e1570e39c44f9e415d6b859457",
"size": "42981",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sklearn/ensemble/tests/test_forest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "451996"
},
{
"name": "C++",
"bytes": "140322"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "7013349"
},
{
"name": "Shell",
"bytes": "19532"
}
],
"symlink_target": ""
}
|
import urllib2
import json
def google_search(search_term):
"""
Searches for a `search_term` which should be a string or a value convertable to string.
Parameters:
- str `search_term`: a string to search for
Returns a tuple (on success):
- first value is a list of search results for the `search_term` returned by Google API
- second value is a Google Search UI URL, where more results can be obtained
Returns False (on failure).
--
Authors:
- michaelpri10
- Jacob-Gray
- Kubo2
"""
# The request also includes the userip parameter which provides the end
# user's IP address. Doing so will help distinguish this legitimate
# server-side traffic from traffic which doesn't come from an end-user.
search_term = search_term.encode('ascii', errors='replace')
url = "https://ajax.googleapis.com/ajax/services/search/web?v=1.0&q=%s&userip=USERS-IP-ADDRESS" % search_term
request = urllib2.Request(url, None)
response = urllib2.urlopen(request)
# Process the JSON string.
results = json.load(response)
# now have some fun with the results...
if len(results["responseData"]["results"]) > 0:
return results["responseData"]["results"], results["responseData"]["cursor"]["moreResultsUrl"]
return False
|
{
"content_hash": "2612d6ca85e23d2af1c2717060bc465d",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 113,
"avg_line_length": 32.58536585365854,
"alnum_prop": 0.6714071856287425,
"repo_name": "Jacob-Gray/WelcomeBot",
"id": "803a02152a4486c669137f2b0c059f78176cf82e",
"size": "1336",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "google_search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3212"
}
],
"symlink_target": ""
}
|
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
from cloudplaya import VERSION
PACKAGE_NAME = 'cloudplaya'
setup(name=PACKAGE_NAME,
version=VERSION,
license='MIT',
description='Python module and command line client for '
'Amazon Cloud Player',
entry_points={
'console_scripts': [
'cloudplaya = cloudplaya.main:main',
],
},
packages=find_packages(),
install_requires=[
'mechanize>=0.2.5',
'requests>=0.13.2',
],
maintainer='Christian Hammond',
maintainer_email='chipx86@chipx86.com',
url='http://github.com/chipx86/cloudplaya/',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development',
]
)
|
{
"content_hash": "bbf803f5a2fec0a96a9ff9bfe3eade61",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 62,
"avg_line_length": 27.926829268292682,
"alnum_prop": 0.5912663755458515,
"repo_name": "chipx86/cloudplaya",
"id": "cc06aebd90c0dbdc32036d7745d684b021126680",
"size": "1168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37501"
}
],
"symlink_target": ""
}
|
"""Stacking classifier and regressor."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from copy import deepcopy
from numbers import Integral
import numpy as np
from joblib import Parallel
import scipy.sparse as sparse
from ..base import clone
from ..base import ClassifierMixin, RegressorMixin, TransformerMixin
from ..base import is_classifier, is_regressor
from ..exceptions import NotFittedError
from ..utils._estimator_html_repr import _VisualBlock
from ._base import _fit_single_estimator
from ._base import _BaseHeterogeneousEnsemble
from ..linear_model import LogisticRegression
from ..linear_model import RidgeCV
from ..model_selection import cross_val_predict
from ..model_selection import check_cv
from ..preprocessing import LabelEncoder
from ..utils import Bunch
from ..utils.multiclass import check_classification_targets, type_of_target
from ..utils.metaestimators import available_if
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils.fixes import delayed
from ..utils._param_validation import HasMethods, StrOptions
from ..utils.validation import _check_feature_names_in
def _estimator_has(attr):
"""Check if we can delegate a method to the underlying estimator.
First, we check the first fitted final estimator if available, otherwise we
check the unfitted final estimator.
"""
return lambda self: (
hasattr(self.final_estimator_, attr)
if hasattr(self, "final_estimator_")
else hasattr(self.final_estimator, attr)
)
class _BaseStacking(TransformerMixin, _BaseHeterogeneousEnsemble, metaclass=ABCMeta):
"""Base class for stacking method."""
_parameter_constraints: dict = {
"estimators": [list],
"final_estimator": [None, HasMethods("fit")],
"cv": ["cv_object", StrOptions({"prefit"})],
"n_jobs": [None, Integral],
"passthrough": ["boolean"],
"verbose": ["verbose"],
}
@abstractmethod
def __init__(
self,
estimators,
final_estimator=None,
*,
cv=None,
stack_method="auto",
n_jobs=None,
verbose=0,
passthrough=False,
):
super().__init__(estimators=estimators)
self.final_estimator = final_estimator
self.cv = cv
self.stack_method = stack_method
self.n_jobs = n_jobs
self.verbose = verbose
self.passthrough = passthrough
def _clone_final_estimator(self, default):
if self.final_estimator is not None:
self.final_estimator_ = clone(self.final_estimator)
else:
self.final_estimator_ = clone(default)
def _concatenate_predictions(self, X, predictions):
"""Concatenate the predictions of each first layer learner and
possibly the input dataset `X`.
If `X` is sparse and `self.passthrough` is False, the output of
`transform` will be dense (the predictions). If `X` is sparse
and `self.passthrough` is True, the output of `transform` will
be sparse.
This helper is in charge of ensuring the predictions are 2D arrays and
it will drop one of the probability column when using probabilities
in the binary case. Indeed, the p(y|c=0) = 1 - p(y|c=1)
When `y` type is `"multilabel-indicator"`` and the method used is
`predict_proba`, `preds` can be either a `ndarray` of shape
`(n_samples, n_class)` or for some estimators a list of `ndarray`.
This function will drop one of the probability column in this situation as well.
"""
X_meta = []
for est_idx, preds in enumerate(predictions):
if isinstance(preds, list):
# `preds` is here a list of `n_targets` 2D ndarrays of
# `n_classes` columns. The k-th column contains the
# probabilities of the samples belonging the k-th class.
#
# Since those probabilities must sum to one for each sample,
# we can work with probabilities of `n_classes - 1` classes.
# Hence we drop the first column.
for pred in preds:
X_meta.append(pred[:, 1:])
elif preds.ndim == 1:
# Some estimator return a 1D array for predictions
# which must be 2-dimensional arrays.
X_meta.append(preds.reshape(-1, 1))
elif (
self.stack_method_[est_idx] == "predict_proba"
and len(self.classes_) == 2
):
# Remove the first column when using probabilities in
# binary classification because both features `preds` are perfectly
# collinear.
X_meta.append(preds[:, 1:])
else:
X_meta.append(preds)
self._n_feature_outs = [pred.shape[1] for pred in X_meta]
if self.passthrough:
X_meta.append(X)
if sparse.issparse(X):
return sparse.hstack(X_meta, format=X.format)
return np.hstack(X_meta)
@staticmethod
def _method_name(name, estimator, method):
if estimator == "drop":
return None
if method == "auto":
if getattr(estimator, "predict_proba", None):
return "predict_proba"
elif getattr(estimator, "decision_function", None):
return "decision_function"
else:
return "predict"
else:
if not hasattr(estimator, method):
raise ValueError(
"Underlying estimator {} does not implement the method {}.".format(
name, method
)
)
return method
def fit(self, X, y, sample_weight=None):
"""Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,) or default=None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if all underlying estimators
support sample weights.
.. versionchanged:: 0.23
when not None, `sample_weight` is passed to all underlying
estimators
Returns
-------
self : object
"""
self._validate_params()
# all_estimators contains all estimators, the one to be fitted and the
# 'drop' string.
names, all_estimators = self._validate_estimators()
self._validate_final_estimator()
stack_method = [self.stack_method] * len(all_estimators)
if self.cv == "prefit":
self.estimators_ = []
for estimator in all_estimators:
if estimator != "drop":
check_is_fitted(estimator)
self.estimators_.append(estimator)
else:
# Fit the base estimators on the whole training data. Those
# base estimators will be used in transform, predict, and
# predict_proba. They are exposed publicly.
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_single_estimator)(clone(est), X, y, sample_weight)
for est in all_estimators
if est != "drop"
)
self.named_estimators_ = Bunch()
est_fitted_idx = 0
for name_est, org_est in zip(names, all_estimators):
if org_est != "drop":
current_estimator = self.estimators_[est_fitted_idx]
self.named_estimators_[name_est] = current_estimator
est_fitted_idx += 1
if hasattr(current_estimator, "feature_names_in_"):
self.feature_names_in_ = current_estimator.feature_names_in_
else:
self.named_estimators_[name_est] = "drop"
self.stack_method_ = [
self._method_name(name, est, meth)
for name, est, meth in zip(names, all_estimators, stack_method)
]
if self.cv == "prefit":
# Generate predictions from prefit models
predictions = [
getattr(estimator, predict_method)(X)
for estimator, predict_method in zip(all_estimators, self.stack_method_)
if estimator != "drop"
]
else:
# To train the meta-classifier using the most data as possible, we use
# a cross-validation to obtain the output of the stacked estimators.
# To ensure that the data provided to each estimator are the same,
# we need to set the random state of the cv if there is one and we
# need to take a copy.
cv = check_cv(self.cv, y=y, classifier=is_classifier(self))
if hasattr(cv, "random_state") and cv.random_state is None:
cv.random_state = np.random.RandomState()
fit_params = (
{"sample_weight": sample_weight} if sample_weight is not None else None
)
predictions = Parallel(n_jobs=self.n_jobs)(
delayed(cross_val_predict)(
clone(est),
X,
y,
cv=deepcopy(cv),
method=meth,
n_jobs=self.n_jobs,
fit_params=fit_params,
verbose=self.verbose,
)
for est, meth in zip(all_estimators, self.stack_method_)
if est != "drop"
)
# Only not None or not 'drop' estimators will be used in transform.
# Remove the None from the method as well.
self.stack_method_ = [
meth
for (meth, est) in zip(self.stack_method_, all_estimators)
if est != "drop"
]
X_meta = self._concatenate_predictions(X, predictions)
_fit_single_estimator(
self.final_estimator_, X_meta, y, sample_weight=sample_weight
)
return self
@property
def n_features_in_(self):
"""Number of features seen during :term:`fit`."""
try:
check_is_fitted(self)
except NotFittedError as nfe:
raise AttributeError(
f"{self.__class__.__name__} object has no attribute n_features_in_"
) from nfe
return self.estimators_[0].n_features_in_
def _transform(self, X):
"""Concatenate and return the predictions of the estimators."""
check_is_fitted(self)
predictions = [
getattr(est, meth)(X)
for est, meth in zip(self.estimators_, self.stack_method_)
if est != "drop"
]
return self._concatenate_predictions(X, predictions)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Input features. The input feature names are only used when `passthrough` is
`True`.
- If `input_features` is `None`, then `feature_names_in_` is
used as feature names in. If `feature_names_in_` is not defined,
then names are generated: `[x0, x1, ..., x(n_features_in_ - 1)]`.
- If `input_features` is an array-like, then `input_features` must
match `feature_names_in_` if `feature_names_in_` is defined.
If `passthrough` is `False`, then only the names of `estimators` are used
to generate the output feature names.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
input_features = _check_feature_names_in(
self, input_features, generate_names=self.passthrough
)
class_name = self.__class__.__name__.lower()
non_dropped_estimators = (
name for name, est in self.estimators if est != "drop"
)
meta_names = []
for est, n_features_out in zip(non_dropped_estimators, self._n_feature_outs):
if n_features_out == 1:
meta_names.append(f"{class_name}_{est}")
else:
meta_names.extend(
f"{class_name}_{est}{i}" for i in range(n_features_out)
)
if self.passthrough:
return np.concatenate((meta_names, input_features))
return np.asarray(meta_names, dtype=object)
@available_if(_estimator_has("predict"))
def predict(self, X, **predict_params):
"""Predict target for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
**predict_params : dict of str -> obj
Parameters to the `predict` called by the `final_estimator`. Note
that this may be used to return uncertainties from some estimators
with `return_std` or `return_cov`. Be aware that it will only
accounts for uncertainty in the final estimator.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_output)
Predicted targets.
"""
check_is_fitted(self)
return self.final_estimator_.predict(self.transform(X), **predict_params)
def _sk_visual_block_(self, final_estimator):
names, estimators = zip(*self.estimators)
parallel = _VisualBlock("parallel", estimators, names=names, dash_wrapped=False)
# final estimator is wrapped in a parallel block to show the label:
# 'final_estimator' in the html repr
final_block = _VisualBlock(
"parallel", [final_estimator], names=["final_estimator"], dash_wrapped=False
)
return _VisualBlock("serial", (parallel, final_block), dash_wrapped=False)
class StackingClassifier(ClassifierMixin, _BaseStacking):
"""Stack of estimators with a final classifier.
Stacked generalization consists in stacking the output of individual
estimator and use a classifier to compute the final prediction. Stacking
allows to use the strength of each individual estimator by using their
output as input of a final estimator.
Note that `estimators_` are fitted on the full `X` while `final_estimator_`
is trained using cross-validated predictions of the base estimators using
`cross_val_predict`.
Read more in the :ref:`User Guide <stacking>`.
.. versionadded:: 0.22
Parameters
----------
estimators : list of (str, estimator)
Base estimators which will be stacked together. Each element of the
list is defined as a tuple of string (i.e. name) and an estimator
instance. An estimator can be set to 'drop' using `set_params`.
final_estimator : estimator, default=None
A classifier which will be used to combine the base estimators.
The default classifier is a
:class:`~sklearn.linear_model.LogisticRegression`.
cv : int, cross-validation generator, iterable, or "prefit", default=None
Determines the cross-validation splitting strategy used in
`cross_val_predict` to train `final_estimator`. Possible inputs for
cv are:
* None, to use the default 5-fold cross validation,
* integer, to specify the number of folds in a (Stratified) KFold,
* An object to be used as a cross-validation generator,
* An iterable yielding train, test splits,
* `"prefit"` to assume the `estimators` are prefit. In this case, the
estimators will not be refitted.
For integer/None inputs, if the estimator is a classifier and y is
either binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used.
In all other cases, :class:`~sklearn.model_selection.KFold` is used.
These splitters are instantiated with `shuffle=False` so the splits
will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
If "prefit" is passed, it is assumed that all `estimators` have
been fitted already. The `final_estimator_` is trained on the `estimators`
predictions on the full training set and are **not** cross validated
predictions. Please note that if the models have been trained on the same
data to train the stacking model, there is a very high risk of overfitting.
.. versionadded:: 1.1
The 'prefit' option was added in 1.1
.. note::
A larger number of split will provide no benefits if the number
of training samples is large enough. Indeed, the training time
will increase. ``cv`` is not used for model evaluation but for
prediction.
stack_method : {'auto', 'predict_proba', 'decision_function', 'predict'}, \
default='auto'
Methods called for each base estimator. It can be:
* if 'auto', it will try to invoke, for each estimator,
`'predict_proba'`, `'decision_function'` or `'predict'` in that
order.
* otherwise, one of `'predict_proba'`, `'decision_function'` or
`'predict'`. If the method is not implemented by the estimator, it
will raise an error.
n_jobs : int, default=None
The number of jobs to run in parallel all `estimators` `fit`.
`None` means 1 unless in a `joblib.parallel_backend` context. -1 means
using all processors. See Glossary for more details.
passthrough : bool, default=False
When False, only the predictions of estimators will be used as
training data for `final_estimator`. When True, the
`final_estimator` is trained on the predictions as well as the
original training data.
verbose : int, default=0
Verbosity level.
Attributes
----------
classes_ : ndarray of shape (n_classes,) or list of ndarray if `y` \
is of type `"multilabel-indicator"`.
Class labels.
estimators_ : list of estimators
The elements of the `estimators` parameter, having been fitted on the
training data. If an estimator has been set to `'drop'`, it
will not appear in `estimators_`. When `cv="prefit"`, `estimators_`
is set to `estimators` and is not fitted again.
named_estimators_ : :class:`~sklearn.utils.Bunch`
Attribute to access any fitted sub-estimators by name.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying classifier exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimators expose such an attribute when fit.
.. versionadded:: 1.0
final_estimator_ : estimator
The classifier which predicts given the output of `estimators_`.
stack_method_ : list of str
The method used by each base estimator.
See Also
--------
StackingRegressor : Stack of estimators with a final regressor.
Notes
-----
When `predict_proba` is used by each estimator (i.e. most of the time for
`stack_method='auto'` or specifically for `stack_method='predict_proba'`),
The first column predicted by each estimator will be dropped in the case
of a binary classification problem. Indeed, both feature will be perfectly
collinear.
References
----------
.. [1] Wolpert, David H. "Stacked generalization." Neural networks 5.2
(1992): 241-259.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sklearn.svm import LinearSVC
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.pipeline import make_pipeline
>>> from sklearn.ensemble import StackingClassifier
>>> X, y = load_iris(return_X_y=True)
>>> estimators = [
... ('rf', RandomForestClassifier(n_estimators=10, random_state=42)),
... ('svr', make_pipeline(StandardScaler(),
... LinearSVC(random_state=42)))
... ]
>>> clf = StackingClassifier(
... estimators=estimators, final_estimator=LogisticRegression()
... )
>>> from sklearn.model_selection import train_test_split
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, stratify=y, random_state=42
... )
>>> clf.fit(X_train, y_train).score(X_test, y_test)
0.9...
"""
_parameter_constraints: dict = {
**_BaseStacking._parameter_constraints,
"stack_method": [
StrOptions({"auto", "predict_proba", "decision_function", "predict"})
],
}
def __init__(
self,
estimators,
final_estimator=None,
*,
cv=None,
stack_method="auto",
n_jobs=None,
passthrough=False,
verbose=0,
):
super().__init__(
estimators=estimators,
final_estimator=final_estimator,
cv=cv,
stack_method=stack_method,
n_jobs=n_jobs,
passthrough=passthrough,
verbose=verbose,
)
def _validate_final_estimator(self):
self._clone_final_estimator(default=LogisticRegression())
if not is_classifier(self.final_estimator_):
raise ValueError(
"'final_estimator' parameter should be a classifier. Got {}".format(
self.final_estimator_
)
)
def fit(self, X, y, sample_weight=None):
"""Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if all underlying estimators
support sample weights.
Returns
-------
self : object
Returns a fitted instance of estimator.
"""
check_classification_targets(y)
if type_of_target(y) == "multilabel-indicator":
self._label_encoder = [LabelEncoder().fit(yk) for yk in y.T]
self.classes_ = [le.classes_ for le in self._label_encoder]
y_encoded = np.array(
[
self._label_encoder[target_idx].transform(target)
for target_idx, target in enumerate(y.T)
]
).T
else:
self._label_encoder = LabelEncoder().fit(y)
self.classes_ = self._label_encoder.classes_
y_encoded = self._label_encoder.transform(y)
return super().fit(X, y_encoded, sample_weight)
@available_if(_estimator_has("predict"))
def predict(self, X, **predict_params):
"""Predict target for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
**predict_params : dict of str -> obj
Parameters to the `predict` called by the `final_estimator`. Note
that this may be used to return uncertainties from some estimators
with `return_std` or `return_cov`. Be aware that it will only
accounts for uncertainty in the final estimator.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_output)
Predicted targets.
"""
y_pred = super().predict(X, **predict_params)
if isinstance(self._label_encoder, list):
# Handle the multilabel-indicator case
y_pred = np.array(
[
self._label_encoder[target_idx].inverse_transform(target)
for target_idx, target in enumerate(y_pred.T)
]
).T
else:
y_pred = self._label_encoder.inverse_transform(y_pred)
return y_pred
@available_if(_estimator_has("predict_proba"))
def predict_proba(self, X):
"""Predict class probabilities for `X` using the final estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
probabilities : ndarray of shape (n_samples, n_classes) or \
list of ndarray of shape (n_output,)
The class probabilities of the input samples.
"""
check_is_fitted(self)
y_pred = self.final_estimator_.predict_proba(self.transform(X))
if isinstance(self._label_encoder, list):
# Handle the multilabel-indicator cases
y_pred = np.array([preds[:, 0] for preds in y_pred]).T
return y_pred
@available_if(_estimator_has("decision_function"))
def decision_function(self, X):
"""Decision function for samples in `X` using the final estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
decisions : ndarray of shape (n_samples,), (n_samples, n_classes), \
or (n_samples, n_classes * (n_classes-1) / 2)
The decision function computed the final estimator.
"""
check_is_fitted(self)
return self.final_estimator_.decision_function(self.transform(X))
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
y_preds : ndarray of shape (n_samples, n_estimators) or \
(n_samples, n_classes * n_estimators)
Prediction outputs for each estimator.
"""
return self._transform(X)
def _sk_visual_block_(self):
# If final_estimator's default changes then this should be
# updated.
if self.final_estimator is None:
final_estimator = LogisticRegression()
else:
final_estimator = self.final_estimator
return super()._sk_visual_block_(final_estimator)
class StackingRegressor(RegressorMixin, _BaseStacking):
"""Stack of estimators with a final regressor.
Stacked generalization consists in stacking the output of individual
estimator and use a regressor to compute the final prediction. Stacking
allows to use the strength of each individual estimator by using their
output as input of a final estimator.
Note that `estimators_` are fitted on the full `X` while `final_estimator_`
is trained using cross-validated predictions of the base estimators using
`cross_val_predict`.
Read more in the :ref:`User Guide <stacking>`.
.. versionadded:: 0.22
Parameters
----------
estimators : list of (str, estimator)
Base estimators which will be stacked together. Each element of the
list is defined as a tuple of string (i.e. name) and an estimator
instance. An estimator can be set to 'drop' using `set_params`.
final_estimator : estimator, default=None
A regressor which will be used to combine the base estimators.
The default regressor is a :class:`~sklearn.linear_model.RidgeCV`.
cv : int, cross-validation generator, iterable, or "prefit", default=None
Determines the cross-validation splitting strategy used in
`cross_val_predict` to train `final_estimator`. Possible inputs for
cv are:
* None, to use the default 5-fold cross validation,
* integer, to specify the number of folds in a (Stratified) KFold,
* An object to be used as a cross-validation generator,
* An iterable yielding train, test splits.
* "prefit" to assume the `estimators` are prefit, and skip cross validation
For integer/None inputs, if the estimator is a classifier and y is
either binary or multiclass,
:class:`~sklearn.model_selection.StratifiedKFold` is used.
In all other cases, :class:`~sklearn.model_selection.KFold` is used.
These splitters are instantiated with `shuffle=False` so the splits
will be the same across calls.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
If "prefit" is passed, it is assumed that all `estimators` have
been fitted already. The `final_estimator_` is trained on the `estimators`
predictions on the full training set and are **not** cross validated
predictions. Please note that if the models have been trained on the same
data to train the stacking model, there is a very high risk of overfitting.
.. versionadded:: 1.1
The 'prefit' option was added in 1.1
.. note::
A larger number of split will provide no benefits if the number
of training samples is large enough. Indeed, the training time
will increase. ``cv`` is not used for model evaluation but for
prediction.
n_jobs : int, default=None
The number of jobs to run in parallel for `fit` of all `estimators`.
`None` means 1 unless in a `joblib.parallel_backend` context. -1 means
using all processors. See Glossary for more details.
passthrough : bool, default=False
When False, only the predictions of estimators will be used as
training data for `final_estimator`. When True, the
`final_estimator` is trained on the predictions as well as the
original training data.
verbose : int, default=0
Verbosity level.
Attributes
----------
estimators_ : list of estimator
The elements of the `estimators` parameter, having been fitted on the
training data. If an estimator has been set to `'drop'`, it
will not appear in `estimators_`. When `cv="prefit"`, `estimators_`
is set to `estimators` and is not fitted again.
named_estimators_ : :class:`~sklearn.utils.Bunch`
Attribute to access any fitted sub-estimators by name.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying regressor exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimators expose such an attribute when fit.
.. versionadded:: 1.0
final_estimator_ : estimator
The regressor to stacked the base estimators fitted.
stack_method_ : list of str
The method used by each base estimator.
See Also
--------
StackingClassifier : Stack of estimators with a final classifier.
References
----------
.. [1] Wolpert, David H. "Stacked generalization." Neural networks 5.2
(1992): 241-259.
Examples
--------
>>> from sklearn.datasets import load_diabetes
>>> from sklearn.linear_model import RidgeCV
>>> from sklearn.svm import LinearSVR
>>> from sklearn.ensemble import RandomForestRegressor
>>> from sklearn.ensemble import StackingRegressor
>>> X, y = load_diabetes(return_X_y=True)
>>> estimators = [
... ('lr', RidgeCV()),
... ('svr', LinearSVR(random_state=42))
... ]
>>> reg = StackingRegressor(
... estimators=estimators,
... final_estimator=RandomForestRegressor(n_estimators=10,
... random_state=42)
... )
>>> from sklearn.model_selection import train_test_split
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, random_state=42
... )
>>> reg.fit(X_train, y_train).score(X_test, y_test)
0.3...
"""
def __init__(
self,
estimators,
final_estimator=None,
*,
cv=None,
n_jobs=None,
passthrough=False,
verbose=0,
):
super().__init__(
estimators=estimators,
final_estimator=final_estimator,
cv=cv,
stack_method="predict",
n_jobs=n_jobs,
passthrough=passthrough,
verbose=verbose,
)
def _validate_final_estimator(self):
self._clone_final_estimator(default=RidgeCV())
if not is_regressor(self.final_estimator_):
raise ValueError(
"'final_estimator' parameter should be a regressor. Got {}".format(
self.final_estimator_
)
)
def fit(self, X, y, sample_weight=None):
"""Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if all underlying estimators
support sample weights.
Returns
-------
self : object
Returns a fitted instance.
"""
y = column_or_1d(y, warn=True)
return super().fit(X, y, sample_weight)
def transform(self, X):
"""Return the predictions for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
y_preds : ndarray of shape (n_samples, n_estimators)
Prediction outputs for each estimator.
"""
return self._transform(X)
def _sk_visual_block_(self):
# If final_estimator's default changes then this should be
# updated.
if self.final_estimator is None:
final_estimator = RidgeCV()
else:
final_estimator = self.final_estimator
return super()._sk_visual_block_(final_estimator)
|
{
"content_hash": "17326241f4296f9b15e1c573c15f1e5f",
"timestamp": "",
"source": "github",
"line_count": 946,
"max_line_length": 88,
"avg_line_length": 38.07188160676533,
"alnum_prop": 0.6003442914260329,
"repo_name": "anntzer/scikit-learn",
"id": "2b43dd5c0a2e3889581f21ffeb3440545d977149",
"size": "36016",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "sklearn/ensemble/_stacking.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "667491"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10429261"
},
{
"name": "Shell",
"bytes": "43325"
}
],
"symlink_target": ""
}
|
class Solution:
# @param head, a ListNode
# @return a ListNode
def deleteDuplicates(self, head):
if head is None:
return None
if head.next is None:
return head
result = ListNode(0)
p = head
q = result
flag = True
while p.next is not None:
if p.val != p.next.val:
if flag:
q.next = ListNode(p.val)
q = q.next
p = p.next
else:
flag = True
p = p.next
else:
flag = False
p = p.next
if flag:
q.next = ListNode(p.val)
result = result.next
return result
|
{
"content_hash": "53ff5a624da7c92922bf6416ee2b24bf",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 44,
"avg_line_length": 25.5,
"alnum_prop": 0.40784313725490196,
"repo_name": "richard912611428iop/leetcode-py",
"id": "013597e0c12fad2073faa861717316cd390d3c27",
"size": "901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Remove Duplicates from Sorted List II.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54872"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0007_auto_20160202_1001'),
]
operations = [
migrations.AlterField(
model_name='event',
name='prettyUrl',
field=models.CharField(blank=True, help_text="URL lisible par les humains pour superposer avec l'image de l'événement.", max_length=150, null=True, verbose_name='url propre'),
),
]
|
{
"content_hash": "c05ed41e926911919f9f768a7f610c3c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 187,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.6375968992248062,
"repo_name": "DesjardinsLab/event-kiosk",
"id": "c0cedb270b553cea09934aee7ca2e8ee9aac41d9",
"size": "590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/event_kiosk/event_kiosk/events/migrations/0008_auto_20160202_1102.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14630"
},
{
"name": "HTML",
"bytes": "1790"
},
{
"name": "JavaScript",
"bytes": "44007"
},
{
"name": "Python",
"bytes": "50632"
},
{
"name": "Shell",
"bytes": "1035"
}
],
"symlink_target": ""
}
|
import logging
import os
import shutil
import tempfile
import time
import traceback
from collections import OrderedDict
# We don't have an explicit pathlib dependency because this code only works with
# the interactive target installed which has an indirect dependency on pathlib
# through ipython>=5.9.0.
from pathlib import Path
from google.protobuf.message import DecodeError
import apache_beam as beam
from apache_beam import coders
from apache_beam.portability.api.beam_interactive_api_pb2 import TestStreamFileHeader
from apache_beam.portability.api.beam_interactive_api_pb2 import TestStreamFileRecord
from apache_beam.portability.api.beam_runner_api_pb2 import TestStreamPayload
from apache_beam.runners.interactive.cache_manager import CacheManager
from apache_beam.runners.interactive.cache_manager import SafeFastPrimitivesCoder
from apache_beam.runners.interactive.caching.cacheable import CacheKey
from apache_beam.testing.test_stream import OutputFormat
from apache_beam.testing.test_stream import ReverseTestStream
from apache_beam.utils import timestamp
_LOGGER = logging.getLogger(__name__)
class StreamingCacheSink(beam.PTransform):
"""A PTransform that writes TestStreamFile(Header|Records)s to file.
This transform takes in an arbitrary element stream and writes the list of
TestStream events (as TestStreamFileRecords) to file. When replayed, this
will produce the best-effort replay of the original job (e.g. some elements
may be produced slightly out of order from the original stream).
Note that this PTransform is assumed to be only run on a single machine where
the following assumptions are correct: elements come in ordered, no two
transforms are writing to the same file. This PTransform is assumed to only
run correctly with the DirectRunner.
TODO(BEAM-9447): Generalize this to more source/sink types aside from file
based. Also, generalize to cases where there might be multiple workers
writing to the same sink.
"""
def __init__(
self,
cache_dir,
filename,
sample_resolution_sec,
coder=SafeFastPrimitivesCoder()):
self._cache_dir = cache_dir
self._filename = filename
self._sample_resolution_sec = sample_resolution_sec
self._coder = coder
self._path = os.path.join(self._cache_dir, self._filename)
@property
def path(self):
"""Returns the path the sink leads to."""
return self._path
@property
def size_in_bytes(self):
"""Returns the space usage in bytes of the sink."""
try:
return os.stat(self._path).st_size
except OSError:
_LOGGER.debug(
'Failed to calculate cache size for file %s, the file might have not '
'been created yet. Return 0. %s',
self._path,
traceback.format_exc())
return 0
def expand(self, pcoll):
class StreamingWriteToText(beam.DoFn):
"""DoFn that performs the writing.
Note that the other file writing methods cannot be used in streaming
contexts.
"""
def __init__(self, full_path, coder=SafeFastPrimitivesCoder()):
self._full_path = full_path
self._coder = coder
# Try and make the given path.
Path(os.path.dirname(full_path)).mkdir(parents=True, exist_ok=True)
def start_bundle(self):
# Open the file for 'append-mode' and writing 'bytes'.
self._fh = open(self._full_path, 'ab')
def finish_bundle(self):
self._fh.close()
def process(self, e):
"""Appends the given element to the file.
"""
self._fh.write(self._coder.encode(e) + b'\n')
return (
pcoll
| ReverseTestStream(
output_tag=self._filename,
sample_resolution_sec=self._sample_resolution_sec,
output_format=OutputFormat.SERIALIZED_TEST_STREAM_FILE_RECORDS,
coder=self._coder)
| beam.ParDo(
StreamingWriteToText(full_path=self._path, coder=self._coder)))
class StreamingCacheSource:
"""A class that reads and parses TestStreamFile(Header|Reader)s.
This source operates in the following way:
1. Wait for up to `timeout_secs` for the file to be available.
2. Read, parse, and emit the entire contents of the file
3. Wait for more events to come or until `is_cache_complete` returns True
4. If there are more events, then go to 2
5. Otherwise, stop emitting.
This class is used to read from file and send its to the TestStream via the
StreamingCacheManager.Reader.
"""
def __init__(self, cache_dir, labels, is_cache_complete=None, coder=None):
if not coder:
coder = SafeFastPrimitivesCoder()
if not is_cache_complete:
is_cache_complete = lambda _: True
self._cache_dir = cache_dir
self._coder = coder
self._labels = labels
self._path = os.path.join(self._cache_dir, *self._labels)
self._is_cache_complete = is_cache_complete
self._pipeline_id = CacheKey.from_str(labels[-1]).pipeline_id
def _wait_until_file_exists(self, timeout_secs=30):
"""Blocks until the file exists for a maximum of timeout_secs.
"""
# Wait for up to `timeout_secs` for the file to be available.
start = time.time()
while not os.path.exists(self._path):
time.sleep(1)
if time.time() - start > timeout_secs:
pcollection_var = CacheKey.from_str(self._labels[-1]).var
raise RuntimeError(
'Timed out waiting for cache file for PCollection `{}` to be '
'available with path {}.'.format(pcollection_var, self._path))
return open(self._path, mode='rb')
def _emit_from_file(self, fh, tail):
"""Emits the TestStreamFile(Header|Record)s from file.
This returns a generator to be able to read all lines from the given file.
If `tail` is True, then it will wait until the cache is complete to exit.
Otherwise, it will read the file only once.
"""
# Always read at least once to read the whole file.
while True:
pos = fh.tell()
line = fh.readline()
# Check if we are at EOF or if we have an incomplete line.
if not line or (line and line[-1] != b'\n'[0]):
# Read at least the first line to get the header.
if not tail and pos != 0:
break
# Complete reading only when the cache is complete.
if self._is_cache_complete(self._pipeline_id):
break
# Otherwise wait for new data in the file to be written.
time.sleep(0.5)
fh.seek(pos)
else:
# The first line at pos = 0 is always the header. Read the line without
# the new line.
to_decode = line[:-1]
proto_cls = TestStreamFileHeader if pos == 0 else TestStreamFileRecord
msg = self._try_parse_as(proto_cls, to_decode)
if msg:
yield msg
else:
break
def _try_parse_as(self, proto_cls, to_decode):
try:
msg = proto_cls()
msg.ParseFromString(self._coder.decode(to_decode))
except DecodeError:
_LOGGER.error(
'Could not parse as %s. This can indicate that the cache is '
'corruputed. Please restart the kernel. '
'\nfile: %s \nmessage: %s',
proto_cls,
self._path,
to_decode)
msg = None
return msg
def read(self, tail):
"""Reads all TestStreamFile(Header|TestStreamFileRecord)s from file.
This returns a generator to be able to read all lines from the given file.
If `tail` is True, then it will wait until the cache is complete to exit.
Otherwise, it will read the file only once.
"""
with self._wait_until_file_exists() as f:
for e in self._emit_from_file(f, tail):
yield e
class StreamingCache(CacheManager):
"""Abstraction that holds the logic for reading and writing to cache.
"""
def __init__(
self,
cache_dir,
is_cache_complete=None,
sample_resolution_sec=0.1,
saved_pcoders=None):
self._sample_resolution_sec = sample_resolution_sec
self._is_cache_complete = is_cache_complete
if cache_dir:
self._cache_dir = cache_dir
else:
self._cache_dir = tempfile.mkdtemp(
prefix='ib-', dir=os.environ.get('TEST_TMPDIR', None))
# List of saved pcoders keyed by PCollection path. It is OK to keep this
# list in memory because once FileBasedCacheManager object is
# destroyed/re-created it loses the access to previously written cache
# objects anyways even if cache_dir already exists. In other words,
# it is not possible to resume execution of Beam pipeline from the
# saved cache if FileBasedCacheManager has been reset.
#
# However, if we are to implement better cache persistence, one needs
# to take care of keeping consistency between the cached PCollection
# and its PCoder type.
self._saved_pcoders = saved_pcoders or {}
self._default_pcoder = SafeFastPrimitivesCoder()
# The sinks to capture data from capturable sources.
# Dict([str, StreamingCacheSink])
self._capture_sinks = {}
self._capture_keys = set()
def size(self, *labels):
if self.exists(*labels):
return os.path.getsize(os.path.join(self._cache_dir, *labels))
return 0
@property
def capture_size(self):
return sum([sink.size_in_bytes for _, sink in self._capture_sinks.items()])
@property
def capture_paths(self):
return list(self._capture_sinks.keys())
@property
def capture_keys(self):
return self._capture_keys
def exists(self, *labels):
path = os.path.join(self._cache_dir, *labels)
return os.path.exists(path)
# TODO(srohde): Modify this to return the correct version.
def read(self, *labels, **args):
"""Returns a generator to read all records from file."""
tail = args.pop('tail', False)
# Only immediately return when the file doesn't exist when the user wants a
# snapshot of the cache (when tail is false).
if not self.exists(*labels) and not tail:
return iter([]), -1
reader = StreamingCacheSource(
self._cache_dir,
labels,
self._is_cache_complete,
self.load_pcoder(*labels)).read(tail=tail)
# Return an empty iterator if there is nothing in the file yet. This can
# only happen when tail is False.
try:
header = next(reader)
except StopIteration:
return iter([]), -1
return StreamingCache.Reader([header], [reader]).read(), 1
def read_multiple(self, labels, tail=True):
"""Returns a generator to read all records from file.
Does tail until the cache is complete. This is because it is used in the
TestStreamServiceController to read from file which is only used during
pipeline runtime which needs to block.
"""
readers = [
StreamingCacheSource(
self._cache_dir, l, self._is_cache_complete,
self.load_pcoder(*l)).read(tail=tail) for l in labels
]
headers = [next(r) for r in readers]
return StreamingCache.Reader(headers, readers).read()
def write(self, values, *labels):
"""Writes the given values to cache.
"""
directory = os.path.join(self._cache_dir, *labels[:-1])
filepath = os.path.join(directory, labels[-1])
if not os.path.exists(directory):
os.makedirs(directory)
with open(filepath, 'ab') as f:
for v in values:
if isinstance(v, (TestStreamFileHeader, TestStreamFileRecord)):
val = v.SerializeToString()
else:
raise TypeError(
'Values given to streaming cache should be either '
'TestStreamFileHeader or TestStreamFileRecord.')
f.write(self.load_pcoder(*labels).encode(val) + b'\n')
def clear(self, *labels):
directory = os.path.join(self._cache_dir, *labels[:-1])
filepath = os.path.join(directory, labels[-1])
self._capture_keys.discard(labels[-1])
if os.path.exists(filepath):
os.remove(filepath)
return True
return False
def source(self, *labels):
"""Returns the StreamingCacheManager source.
This is beam.Impulse() because unbounded sources will be marked with this
and then the PipelineInstrument will replace these with a TestStream.
"""
return beam.Impulse()
def sink(self, labels, is_capture=False):
"""Returns a StreamingCacheSink to write elements to file.
Note that this is assumed to only work in the DirectRunner as the underlying
StreamingCacheSink assumes a single machine to have correct element
ordering.
"""
filename = labels[-1]
cache_dir = os.path.join(self._cache_dir, *labels[:-1])
sink = StreamingCacheSink(
cache_dir,
filename,
self._sample_resolution_sec,
self.load_pcoder(*labels))
if is_capture:
self._capture_sinks[sink.path] = sink
self._capture_keys.add(filename)
return sink
def save_pcoder(self, pcoder, *labels):
self._saved_pcoders[os.path.join(self._cache_dir, *labels)] = pcoder
def load_pcoder(self, *labels):
saved_pcoder = self._saved_pcoders.get(
os.path.join(self._cache_dir, *labels), None)
if saved_pcoder is None or isinstance(saved_pcoder,
coders.FastPrimitivesCoder):
return self._default_pcoder
return saved_pcoder
def cleanup(self):
if os.path.exists(self._cache_dir):
def on_fail_to_cleanup(function, path, excinfo):
_LOGGER.warning(
'Failed to clean up temporary files: %s. You may'
'manually delete them if necessary. Error was: %s',
path,
excinfo)
shutil.rmtree(self._cache_dir, onerror=on_fail_to_cleanup)
self._saved_pcoders = {}
self._capture_sinks = {}
self._capture_keys = set()
class Reader(object):
"""Abstraction that reads from PCollection readers.
This class is an Abstraction layer over multiple PCollection readers to be
used for supplying a TestStream service with events.
This class is also responsible for holding the state of the clock, injecting
clock advancement events, and watermark advancement events.
"""
def __init__(self, headers, readers):
# This timestamp is used as the monotonic clock to order events in the
# replay.
self._monotonic_clock = timestamp.Timestamp.of(0)
# The PCollection cache readers.
self._readers = {}
# The file headers that are metadata for that particular PCollection.
# The header allows for metadata about an entire stream, so that the data
# isn't copied per record.
self._headers = {header.tag: header for header in headers}
self._readers = OrderedDict(
((h.tag, r) for (h, r) in zip(headers, readers)))
# The most recently read timestamp per tag.
self._stream_times = {
tag: timestamp.Timestamp(seconds=0)
for tag in self._headers
}
def _test_stream_events_before_target(self, target_timestamp):
"""Reads the next iteration of elements from each stream.
Retrieves an element from each stream iff the most recently read timestamp
from that stream is less than the target_timestamp. Since the amount of
events may not fit into memory, this StreamingCache reads at most one
element from each stream at a time.
"""
records = []
for tag, r in self._readers.items():
# The target_timestamp is the maximum timestamp that was read from the
# stream. Some readers may have elements that are less than this. Thus,
# we skip all readers that already have elements that are at this
# timestamp so that we don't read everything into memory.
if self._stream_times[tag] >= target_timestamp:
continue
try:
record = next(r).recorded_event
if record.HasField('processing_time_event'):
self._stream_times[tag] += timestamp.Duration(
micros=record.processing_time_event.advance_duration)
records.append((tag, record, self._stream_times[tag]))
except StopIteration:
pass
return records
def _merge_sort(self, previous_events, new_events):
return sorted(
previous_events + new_events, key=lambda x: x[2], reverse=True)
def _min_timestamp_of(self, events):
return events[-1][2] if events else timestamp.MAX_TIMESTAMP
def _event_stream_caught_up_to_target(self, events, target_timestamp):
empty_events = not events
stream_is_past_target = self._min_timestamp_of(events) > target_timestamp
return empty_events or stream_is_past_target
def read(self):
"""Reads records from PCollection readers.
"""
# The largest timestamp read from the different streams.
target_timestamp = timestamp.MAX_TIMESTAMP
# The events from last iteration that are past the target timestamp.
unsent_events = []
# Emit events until all events have been read.
while True:
# Read the next set of events. The read events will most likely be
# out of order if there are multiple readers. Here we sort them into
# a more manageable state.
new_events = self._test_stream_events_before_target(target_timestamp)
events_to_send = self._merge_sort(unsent_events, new_events)
if not events_to_send:
break
# Get the next largest timestamp in the stream. This is used as the
# timestamp for readers to "catch-up" to. This will only read from
# readers with a timestamp less than this.
target_timestamp = self._min_timestamp_of(events_to_send)
# Loop through the elements with the correct timestamp.
while not self._event_stream_caught_up_to_target(events_to_send,
target_timestamp):
# First advance the clock to match the time of the stream. This has
# a side-effect of also advancing this cache's clock.
tag, r, curr_timestamp = events_to_send.pop()
if curr_timestamp > self._monotonic_clock:
yield self._advance_processing_time(curr_timestamp)
# Then, send either a new element or watermark.
if r.HasField('element_event'):
r.element_event.tag = tag
yield r
elif r.HasField('watermark_event'):
r.watermark_event.tag = tag
yield r
unsent_events = events_to_send
target_timestamp = self._min_timestamp_of(unsent_events)
def _advance_processing_time(self, new_timestamp):
"""Advances the internal clock and returns an AdvanceProcessingTime event.
"""
advancy_by = new_timestamp.micros - self._monotonic_clock.micros
e = TestStreamPayload.Event(
processing_time_event=TestStreamPayload.Event.AdvanceProcessingTime(
advance_duration=advancy_by))
self._monotonic_clock = new_timestamp
return e
|
{
"content_hash": "7a7ee95636e3659a7d11a74e1457d88c",
"timestamp": "",
"source": "github",
"line_count": 516,
"max_line_length": 85,
"avg_line_length": 36.77713178294574,
"alnum_prop": 0.6609580017916425,
"repo_name": "axbaretto/beam",
"id": "fc8a8aa4476895cee3feb755742e2e38cf24aedb",
"size": "19783",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/runners/interactive/caching/streaming_cache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "Batchfile",
"bytes": "3220"
},
{
"name": "C",
"bytes": "1339873"
},
{
"name": "C++",
"bytes": "1132901"
},
{
"name": "CSS",
"bytes": "124283"
},
{
"name": "Dockerfile",
"bytes": "23950"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2795906"
},
{
"name": "Groovy",
"bytes": "187109"
},
{
"name": "HTML",
"bytes": "238575"
},
{
"name": "Java",
"bytes": "39085315"
},
{
"name": "JavaScript",
"bytes": "1221326"
},
{
"name": "Jupyter Notebook",
"bytes": "7396"
},
{
"name": "Makefile",
"bytes": "354938"
},
{
"name": "Python",
"bytes": "51449019"
},
{
"name": "Roff",
"bytes": "70716"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "351541"
},
{
"name": "TeX",
"bytes": "70920"
},
{
"name": "Thrift",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
"""Commands for updating backend services.
There are separate alpha, beta, and GA command classes in this file.
"""
import copy
from googlecloudsdk.api_lib.compute import backend_services_utils
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute.backend_services import flags
from googlecloudsdk.core import log
@base.ReleaseTracks(base.ReleaseTrack.GA)
class UpdateGA(base_classes.ReadWriteCommand):
"""Update a backend service."""
@staticmethod
def Args(parser):
flags.GLOBAL_REGIONAL_BACKEND_SERVICE_ARG.AddArgument(parser)
flags.AddDescription(parser)
flags.AddHealthChecks(parser)
flags.AddHttpHealthChecks(parser)
flags.AddHttpsHealthChecks(parser)
flags.AddTimeout(parser, default=None)
flags.AddPortName(parser)
flags.AddProtocol(parser, default=None)
flags.AddEnableCdn(parser, default=None)
flags.AddSessionAffinity(parser, internal_lb=True)
flags.AddAffinityCookieTtl(parser)
flags.AddConnectionDrainingTimeout(parser)
@property
def service(self):
if self.regional:
return self.compute.regionBackendServices
return self.compute.backendServices
@property
def resource_type(self):
if self.regional:
return 'regionBackendServices'
return 'backendServices'
def CreateReference(self, args):
if self.regional:
return flags.GLOBAL_REGIONAL_BACKEND_SERVICE_ARG.ResolveAsResource(
args, self.resources,
default_scope=compute_scope.ScopeEnum.GLOBAL)
return flags.GLOBAL_BACKEND_SERVICE_ARG.ResolveAsResource(
args, self.resources)
def GetGetRequest(self, args):
if self.regional:
return (
self.service,
'Get',
self.messages.ComputeRegionBackendServicesGetRequest(
project=self.project,
region=self.ref.region,
backendService=self.ref.Name()))
return (
self.service,
'Get',
self.messages.ComputeBackendServicesGetRequest(
project=self.project,
backendService=self.ref.Name()))
def GetSetRequest(self, args, replacement, _):
if self.regional:
return (
self.service,
'Update',
self.messages.ComputeRegionBackendServicesUpdateRequest(
project=self.project,
region=self.ref.region,
backendService=self.ref.Name(),
backendServiceResource=replacement))
return (
self.service,
'Update',
self.messages.ComputeBackendServicesUpdateRequest(
project=self.project,
backendService=self.ref.Name(),
backendServiceResource=replacement))
def Modify(self, args, existing):
replacement = copy.deepcopy(existing)
if args.connection_draining_timeout is not None:
replacement.connectionDraining = self.messages.ConnectionDraining(
drainingTimeoutSec=args.connection_draining_timeout)
if args.description:
replacement.description = args.description
elif args.description is not None:
replacement.description = None
health_checks = backend_services_utils.GetHealthChecks(args, self)
if health_checks:
replacement.healthChecks = health_checks
if args.timeout:
replacement.timeoutSec = args.timeout
if args.port:
replacement.port = args.port
if args.port_name:
replacement.portName = args.port_name
if args.protocol:
replacement.protocol = (self.messages.BackendService
.ProtocolValueValuesEnum(args.protocol))
if args.enable_cdn is not None:
replacement.enableCDN = args.enable_cdn
if args.session_affinity is not None:
replacement.sessionAffinity = (
self.messages.BackendService.SessionAffinityValueValuesEnum(
args.session_affinity))
if args.affinity_cookie_ttl is not None:
replacement.affinityCookieTtlSec = args.affinity_cookie_ttl
return replacement
def ValidateArgs(self, args):
if not any([
args.affinity_cookie_ttl is not None,
args.connection_draining_timeout is not None,
args.description is not None,
args.enable_cdn is not None,
args.health_checks,
args.http_health_checks,
args.https_health_checks,
args.port,
args.port_name,
args.protocol,
args.session_affinity is not None,
args.timeout is not None,
]):
raise exceptions.ToolException('At least one property must be modified.')
def SetRegional(self, args):
# Check whether --region flag was used for regional resource.
self.regional = getattr(args, 'region', None) is not None
def Run(self, args):
self.ValidateArgs(args)
self.regional = backend_services_utils.IsRegionalRequest(args)
return super(UpdateGA, self).Run(args)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class UpdateAlpha(UpdateGA):
"""Update a backend service."""
@staticmethod
def Args(parser):
flags.GLOBAL_REGIONAL_BACKEND_SERVICE_ARG.AddArgument(parser)
flags.AddDescription(parser)
flags.AddHealthChecks(parser)
flags.AddHttpHealthChecks(parser)
flags.AddHttpsHealthChecks(parser)
flags.AddTimeout(parser, default=None)
flags.AddPortName(parser)
flags.AddProtocol(parser, default=None)
flags.AddConnectionDrainingTimeout(parser)
flags.AddEnableCdn(parser, default=None)
flags.AddCacheKeyIncludeProtocol(parser, default=None)
flags.AddCacheKeyIncludeHost(parser, default=None)
flags.AddCacheKeyIncludeQueryString(parser, default=None)
flags.AddCacheKeyQueryStringList(parser)
flags.AddSessionAffinity(parser, internal_lb=True)
flags.AddAffinityCookieTtl(parser)
flags.AddIap(parser)
def Modify(self, args, existing):
replacement = super(UpdateAlpha, self).Modify(args, existing)
if args.connection_draining_timeout is not None:
replacement.connectionDraining = self.messages.ConnectionDraining(
drainingTimeoutSec=args.connection_draining_timeout)
if args.iap:
replacement.iap = backend_services_utils.GetIAP(
args, self.messages,
existing_iap_settings=getattr(existing, 'iap', None))
if (replacement.iap.enabled and replacement.protocol is not
self.messages.BackendService.ProtocolValueValuesEnum.HTTPS):
log.warning('IAP has been enabled for a backend service that does '
'not use HTTPS. Data sent from the Load Balancer to your '
'VM will not be encrypted.')
cache_key_policy = self.messages.CacheKeyPolicy()
if (replacement.cdnPolicy is not None and
replacement.cdnPolicy.cacheKeyPolicy is not None):
cache_key_policy = replacement.cdnPolicy.cacheKeyPolicy
backend_services_utils.ValidateCacheKeyPolicyArgs(args)
backend_services_utils.UpdateCacheKeyPolicy(args, cache_key_policy)
if (args.cache_key_include_protocol is not None or
args.cache_key_include_host is not None or
args.cache_key_include_query_string is not None or
args.cache_key_query_string_whitelist is not None or
args.cache_key_query_string_blacklist is not None):
replacement.cdnPolicy = self.messages.BackendServiceCdnPolicy(
cacheKeyPolicy=cache_key_policy)
return replacement
def ValidateArgs(self, args):
if not any([
args.affinity_cookie_ttl is not None,
args.connection_draining_timeout is not None,
args.description is not None,
args.enable_cdn is not None,
args.cache_key_include_protocol is not None,
args.cache_key_include_host is not None,
args.cache_key_include_query_string is not None,
args.cache_key_query_string_whitelist is not None,
args.cache_key_query_string_blacklist is not None,
args.http_health_checks,
args.port,
args.port_name,
args.protocol,
args.session_affinity is not None,
args.timeout is not None,
getattr(args, 'health_checks', None),
getattr(args, 'https_health_checks', None),
getattr(args, 'iap', None),
]):
raise exceptions.ToolException('At least one property must be modified.')
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class UpdateBeta(UpdateGA):
"""Update a backend service."""
@staticmethod
def Args(parser):
flags.GLOBAL_REGIONAL_BACKEND_SERVICE_ARG.AddArgument(parser)
flags.AddDescription(parser)
flags.AddHealthChecks(parser)
flags.AddHttpHealthChecks(parser)
flags.AddHttpsHealthChecks(parser)
flags.AddTimeout(parser, default=None)
flags.AddPortName(parser)
flags.AddProtocol(parser, default=None)
flags.AddConnectionDrainingTimeout(parser)
flags.AddEnableCdn(parser, default=None)
flags.AddSessionAffinity(parser, internal_lb=True)
flags.AddAffinityCookieTtl(parser)
def Modify(self, args, existing):
replacement = super(UpdateBeta, self).Modify(args, existing)
if args.connection_draining_timeout is not None:
replacement.connectionDraining = self.messages.ConnectionDraining(
drainingTimeoutSec=args.connection_draining_timeout)
return replacement
def ValidateArgs(self, args):
if not any([
args.affinity_cookie_ttl is not None,
args.connection_draining_timeout is not None,
args.description is not None,
args.enable_cdn is not None,
args.health_checks,
args.http_health_checks,
args.https_health_checks,
args.port,
args.port_name,
args.protocol,
args.session_affinity is not None,
args.timeout is not None,
]):
raise exceptions.ToolException('At least one property must be modified.')
UpdateGA.detailed_help = {
'brief': 'Update a backend service',
'DESCRIPTION': """
*{command}* is used to update backend services.
""",
}
UpdateAlpha.detailed_help = UpdateGA.detailed_help
UpdateBeta.detailed_help = UpdateGA.detailed_help
|
{
"content_hash": "16b7f05b093196e6ebd83e24e2ca6516",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 79,
"avg_line_length": 34.22742474916388,
"alnum_prop": 0.6996286886847762,
"repo_name": "KaranToor/MA450",
"id": "79c9dd905d8d1d229eb47ac58ef6bfc73300a0dd",
"size": "10829",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/.install/.backup/lib/surface/compute/backend_services/update.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="family", parent_name="pie.title.font", **kwargs):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
{
"content_hash": "c8f405d20d417b530157e35836cd6b04",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 85,
"avg_line_length": 39.285714285714285,
"alnum_prop": 0.5963636363636363,
"repo_name": "plotly/plotly.py",
"id": "04cb80f5b4192fe12f6733afce317dc62a76cc9d",
"size": "550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/pie/title/font/_family.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
import sys
import os
import hashlib
import py_compile
def flag_checker(candidate):
random = b"aes-128-ecb"
key = hashlib.sha512(random).digest()
correct = bytearray([242, 122, 104, 129, 95, 139, 6, 78, 89, 144, 165, 79, 212, 62, 71, 57, 211, 116, 128])
candidate = bytearray(bytes(candidate, "utf-8"))
for i in range(min(len(candidate), len(key))):
candidate[i] ^= key[i]
if correct == candidate:
print("Yes, that's the flag")
else:
print("Not sure that's that. Not a flag though.")
print("vvvv Here's some binary data. Figure it out.")
print("")
print("")
sys.stdout.flush()
py_compile.compile(__file__, cfile="/tmp/out.pyc")
with open("/tmp/out.pyc", "rb") as f:
sys.stdout.buffer.write(f.read())
sys.stdout.buffer.flush()
print("")
print("")
print("^^^^ Here's some binary data. Figure it out.")
sys.stdout.flush()
|
{
"content_hash": "d6c7ac878e7eeac7bb0ecd547b4d7854",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 109,
"avg_line_length": 25.147058823529413,
"alnum_prop": 0.6526315789473685,
"repo_name": "google/google-ctf",
"id": "045f51f1ee838bf002bc452db4d61d8afe5e09cc",
"size": "874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2020/hackceler8/match-pre-package/task3/task3.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AIDL",
"bytes": "508"
},
{
"name": "Assembly",
"bytes": "107617"
},
{
"name": "BASIC",
"bytes": "6068"
},
{
"name": "Batchfile",
"bytes": "1032"
},
{
"name": "Blade",
"bytes": "14530"
},
{
"name": "C",
"bytes": "1481904"
},
{
"name": "C++",
"bytes": "2139472"
},
{
"name": "CMake",
"bytes": "11595"
},
{
"name": "CSS",
"bytes": "172375"
},
{
"name": "Dart",
"bytes": "6282"
},
{
"name": "Dockerfile",
"bytes": "232352"
},
{
"name": "EJS",
"bytes": "92308"
},
{
"name": "Emacs Lisp",
"bytes": "2668"
},
{
"name": "GDB",
"bytes": "273"
},
{
"name": "GLSL",
"bytes": "33392"
},
{
"name": "Go",
"bytes": "3031142"
},
{
"name": "HTML",
"bytes": "467647"
},
{
"name": "Java",
"bytes": "174199"
},
{
"name": "JavaScript",
"bytes": "2643200"
},
{
"name": "Lua",
"bytes": "5944"
},
{
"name": "Makefile",
"bytes": "149152"
},
{
"name": "NSIS",
"bytes": "2800"
},
{
"name": "Nix",
"bytes": "139"
},
{
"name": "PHP",
"bytes": "311900"
},
{
"name": "Perl",
"bytes": "32742"
},
{
"name": "Pug",
"bytes": "8752"
},
{
"name": "Python",
"bytes": "1756592"
},
{
"name": "Red",
"bytes": "188"
},
{
"name": "Rust",
"bytes": "541267"
},
{
"name": "Sage",
"bytes": "39814"
},
{
"name": "Shell",
"bytes": "382149"
},
{
"name": "Smali",
"bytes": "2316656"
},
{
"name": "Starlark",
"bytes": "8216"
},
{
"name": "SystemVerilog",
"bytes": "16466"
},
{
"name": "VCL",
"bytes": "895"
},
{
"name": "Verilog",
"bytes": "7230"
},
{
"name": "Vim Script",
"bytes": "890"
},
{
"name": "Vue",
"bytes": "10248"
}
],
"symlink_target": ""
}
|
import math, string
import numpy as np
from prettytable import PrettyTable
import unsupervised.hungarian
# --------------------------------------------------------------
# Ranking Similarity
# --------------------------------------------------------------
class JaccardBinary:
"""
Simple binary Jaccard-based ranking comparison, which does not take into account rank positions.
"""
def similarity( self, gold_ranking, test_ranking ):
sx = set(gold_ranking)
sy = set(test_ranking)
numer = len( sx.intersection(sy) )
if numer == 0:
return 0.0
denom = len( sx.union(sy) )
if denom == 0:
return 0.0
return float(numer)/denom
def __str__( self ):
return "%s" % ( self.__class__.__name__ )
class AverageJaccard(JaccardBinary):
"""
A top-weighted version of Jaccard, which takes into account rank positions.
This is based on Fagin's Average Overlap Intersection Metric.
"""
def similarity( self, gold_ranking, test_ranking ):
k = min( len(gold_ranking), len(test_ranking) )
total = 0.0
for i in range(1,k+1):
total += JaccardBinary.similarity( self, gold_ranking[0:i], test_ranking[0:i] )
return total/k
# --------------------------------------------------------------
# Ranking Set Agreement
# --------------------------------------------------------------
class RankingSetAgreement:
"""
Calculates the agreement between pairs of ranking sets, using a specified measure of
similarity between rankings.
"""
def __init__( self, metric = AverageJaccard() ):
self.metric = metric
def similarity( self, rankings1, rankings2 ):
"""
Calculate the overall agreement between two different ranking sets. This is given by the
mean similarity values for all matched pairs.
"""
self.results = None
self.S = self.build_matrix( rankings1, rankings2 )
score, self.results = self.hungarian_matching()
return score
def build_matrix( self, rankings1, rankings2 ):
"""
Construct the similarity matrix between the pairs of rankings in two
different ranking sets.
"""
rows = len(rankings1)
cols = len(rankings2)
S = np.zeros( (rows,cols) )
for row in range(rows):
for col in range(cols):
S[row,col] = self.metric.similarity( rankings1[row], rankings2[col] )
return S
def hungarian_matching( self ):
"""
Solve the Hungarian matching problem to find the best matches between columns and rows based on
values in the specified similarity matrix.
"""
# apply hungarian matching
h = unsupervised.hungarian.Hungarian()
C = h.make_cost_matrix(self.S)
h.calculate(C)
results = h.get_results()
# compute score based on similarities
score = 0.0
for (row,col) in results:
score += self.S[row,col]
score /= len(results)
return (score, results)
# --------------------------------------------------------------
# Utilities
# --------------------------------------------------------------
def calc_relevance_scores( n, rel_measure ):
"""
Utility function to compute a sequence of relevance scores using the specified function.
"""
scores = []
for i in range(n):
scores.append( rel_measure.relevance( i + 1 ) )
return scores
def term_rankings_size( term_rankings ):
"""
Return the number of terms covered by a list of multiple term rankings.
"""
m = 0
for ranking in term_rankings:
if m == 0:
m = len(ranking)
else:
m = min( len(ranking), m )
return m
def truncate_term_rankings( orig_rankings, top ):
"""
Truncate a list of multiple term rankings to the specified length.
"""
if top < 1:
return orig_rankings
trunc_rankings = []
for ranking in orig_rankings:
trunc_rankings.append( ranking[0:min(len(ranking),top)] )
return trunc_rankings
def format_term_rankings( term_rankings, labels = None, top = 10 ):
"""
Format a list of multiple term rankings using PrettyTable.
"""
from prettytable import PrettyTable
# add header
header = ["Rank"]
if labels is None:
for i in range( len(term_rankings) ):
header.append("C%02d" % (i+1) )
else:
for label in labels:
header.append(label)
tab = PrettyTable(header)
for field in header:
tab.align[field] = "l"
# add body
for pos in range(top):
row = [ str(pos+1) ]
for ranking in term_rankings:
# have we run out of terms?
if len(ranking) <= pos:
row.append( "" )
else:
row.append( ranking[pos] )
tab.add_row( row )
return tab
def format_term_rankings_long( term_rankings, labels = None, top = 10 ):
"""
Format a list of multiple term rankings using lists.
"""
if labels is None:
labels = []
for i in range( len(term_rankings) ):
labels.append("C%02d" % (i+1) )
max_label_len = 0
for label in labels:
max_label_len = max(max_label_len,len(label))
max_label_len += 1
s = ""
for i, label in enumerate(labels):
s += label.ljust(max_label_len)
s += ": "
sterms = ""
for term in term_rankings[i][0:top]:
if len(sterms) > 0:
sterms += ", "
sterms += term
s += sterms + "\n"
return s
|
{
"content_hash": "b7d95948446ea0cee7f8994f24f11661",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 98,
"avg_line_length": 27.5,
"alnum_prop": 0.6242424242424243,
"repo_name": "derekgreene/topic-stability",
"id": "bf19af68cd7a3602e403b5d7f7b4bbf9e747a0ae",
"size": "4950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unsupervised/rankings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "97217"
}
],
"symlink_target": ""
}
|
import six
import os
def print_arguments(args):
print('----------- Configuration Arguments -----------')
for arg, value in sorted(six.iteritems(vars(args))):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
def mkdir(path):
if not os.path.isdir(path):
mkdir(os.path.split(path)[0])
else:
return
os.mkdir(path)
def pos_encoding_init():
pass
def scaled_dot_product_attention():
pass
|
{
"content_hash": "4b394c8d93f0dacc68aafba6bd60d58b",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 61,
"avg_line_length": 19.44,
"alnum_prop": 0.5185185185185185,
"repo_name": "kuke/models",
"id": "9da8571f2c47b6e87219b8c579ae3a7645f6afd5",
"size": "486",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "fluid/PaddleNLP/deep_attention_matching_net/utils/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "15149"
},
{
"name": "Perl",
"bytes": "2072"
},
{
"name": "Python",
"bytes": "2905007"
},
{
"name": "Shell",
"bytes": "2506531"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.contrib.auth.models import User, Group
from django.utils.translation import ugettext_lazy as _
from django.core.validators import RegexValidator
from django.conf import settings
class Repository(models.Model):
"""
Git repository
"""
# basic info
name = models.CharField(
max_length=64,
validators=[RegexValidator(regex=r'^[^\x00-\x2c\x2f\x3a-\x40\x5b-\x5e\x60\x7b-\x7f\s]+$')],
verbose_name=_('name'),
help_text=_('Name of the repository, cannot contain special characters other than hyphens.'),
)
description = models.TextField(blank=True, verbose_name=_('description'))
# owner
user = models.ForeignKey(
User,
blank=True,
null=True,
related_name='repositories',
on_delete=models.SET_NULL,
verbose_name=_('user'),
help_text=_('Owner of the repository. Repository path will be prefixed by owner\'s username.'),
)
# access control
users = models.ManyToManyField(
User,
blank=True,
verbose_name=_('users'),
help_text=_('These users have right access to the repository.'),
)
groups = models.ManyToManyField(
Group,
blank=True,
verbose_name=_('groups'),
help_text=_('Users in these groups have right access to the repository.'),
)
is_private = models.BooleanField(
default=True,
verbose_name=_('is private'),
help_text=_('Restrict read access to specified users and groups.'),
)
# meta
created = models.DateTimeField(auto_now_add=True, verbose_name=_('created'))
modified = models.DateTimeField(auto_now=True, verbose_name=_('modified'))
class Meta:
verbose_name = _('repository')
verbose_name_plural = _('repositories')
ordering = ['user', 'name']
unique_together = ['user', 'name']
def __unicode__(self):
if self.user:
return u'%s/%s' % (self.user.username, self.name)
return u'./%s' % (self.name)
def can_read(self, user):
if not user and settings.PROTECTED:
return False
if not self.is_private:
return True
return self.can_write(user)
def can_write(self, user):
if not user:
return False
if user.id == self.user_id:
return True
if self.users.filter(pk=user.id).exists():
return True
if self.groups.filter(user__pk=user.id).exists():
return True
return False
|
{
"content_hash": "cd78c01d833faaa502e374bb540c271c",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 103,
"avg_line_length": 30.341176470588234,
"alnum_prop": 0.5994571539356339,
"repo_name": "gitmill/gitmill",
"id": "7cee1128a247caa3fe76fe914ef7542e1be27c51",
"size": "2579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/repository/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "27514"
},
{
"name": "CoffeeScript",
"bytes": "3686"
},
{
"name": "JavaScript",
"bytes": "2405072"
},
{
"name": "Python",
"bytes": "27896"
},
{
"name": "Shell",
"bytes": "2510"
}
],
"symlink_target": ""
}
|
from caspy.lw import Lightweight as Base
from caspy import str
class Currency(Base):
_fields = 'cur_code', 'shortcut', 'symbol', 'long_name'
class Book(Base):
_fields = 'book_id', 'name', 'created_at'
def __str__(self):
return str(self.name)
class AccountType(Base):
_fields = 'account_type', 'sign', 'credit_term', 'debit_term'
class Account(Base):
_fields = ('account_id', 'parent_id', 'name', 'path', 'book',
'account_type', 'currency', 'description')
class Transaction(Base):
_fields = ('transaction_id', 'date', 'description', 'splits')
def __init__(self, *args, **kwargs):
super(Transaction, self).__init__(*args, **kwargs)
if self.splits is None:
self.splits = []
class Split(Base):
_fields = ('split_id', 'number', 'description', 'account_id',
'status', 'amount')
|
{
"content_hash": "faa4e875560fc34ac0a50a79e9488421",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 65,
"avg_line_length": 24.555555555555557,
"alnum_prop": 0.5859728506787331,
"repo_name": "altaurog/django-caspy",
"id": "a0f4cabd34c233e28c85303637702395a915130b",
"size": "884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "caspy/domain/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "564"
},
{
"name": "HTML",
"bytes": "11065"
},
{
"name": "JavaScript",
"bytes": "26452"
},
{
"name": "Python",
"bytes": "126094"
}
],
"symlink_target": ""
}
|
class Form(object):
'''
classdocs
'''
def __init__(self, action, method = 'GET'):
'''
Constructor
'''
self.fields = []
self.action = action
self.method = method
def add_select_field(self, Name, param, options, multiple = False, default = None):
"""
Create the select field.
"""
options_html = []
for opt_name, value in options.items() :
default_html = ''
if not default == None :
if opt_name == default :
default_html = "selected='selected'"
options_html.append("<option value='{:s}' {:s}> {:s} </option>".format(value, default_html, opt_name))
multiple_html = ''
if multiple == True :
multiple_html = "multiple='multiple'"
self.fields.append("<tr> <td> {:s} </td><td> <select name='{:s}' {:s}>\t {:s} \t</select> </td></tr>".format(Name, param, multiple_html, '\t'.join(options_html)))
def add_text_field(self, Name, param):
"""
Create the text field
"""
self.fields.append("<tr> <td> {:s} </td><td> <input type='text' name='{:s}'/> </td> </tr>".format(Name, param))
def render(self):
"""
Render the form as required.
"""
return """
<form action='{:s}' method='{:s}'>
<table>
{:s}
</br>
<tr> <td><input type='submit' name='Submit' /></td> </tr>
</table>
</form>
""".format(self.action, self.method, '</br>\n\t\t\t\t\t'.join(self.fields))
# Local Variables:
# mode: python
# tab-width: 8
# python-indent-offset: 4
# indent-tabs-mode: t
# End:
# ex: shiftwidth=4 tabstop=8
|
{
"content_hash": "4ff99a4789887c8d0c24d03be2ecfadb",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 171,
"avg_line_length": 28.426470588235293,
"alnum_prop": 0.4469736161407139,
"repo_name": "ankeshanand/benchmark",
"id": "8f752f59ba7ba3bbf7e0fc2b397cc77453f40e5b",
"size": "3468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libs/Form.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "19881"
},
{
"name": "HTML",
"bytes": "53163"
},
{
"name": "JavaScript",
"bytes": "397712"
},
{
"name": "Makefile",
"bytes": "449"
},
{
"name": "Python",
"bytes": "109917"
},
{
"name": "Shell",
"bytes": "856"
}
],
"symlink_target": ""
}
|
"""Utilities for pre-processing classification data."""
from absl import logging
from official.nlp.xlnet import data_utils
SEG_ID_A = 0
SEG_ID_B = 1
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def convert_single_example(example_index, example, label_list, max_seq_length,
tokenize_fn, use_bert_format):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[1] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
if label_list is not None:
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenize_fn(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenize_fn(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for two [SEP] & one [CLS] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for one [SEP] & one [CLS] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:max_seq_length - 2]
tokens = []
segment_ids = []
for token in tokens_a:
tokens.append(token)
segment_ids.append(SEG_ID_A)
tokens.append(data_utils.SEP_ID)
segment_ids.append(SEG_ID_A)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(SEG_ID_B)
tokens.append(data_utils.SEP_ID)
segment_ids.append(SEG_ID_B)
if use_bert_format:
tokens.insert(0, data_utils.CLS_ID)
segment_ids.insert(0, data_utils.SEG_ID_CLS)
else:
tokens.append(data_utils.CLS_ID)
segment_ids.append(data_utils.SEG_ID_CLS)
input_ids = tokens
# The mask has 0 for real tokens and 1 for padding tokens. Only real
# tokens are attended to.
input_mask = [0] * len(input_ids)
# Zero-pad up to the sequence length.
if len(input_ids) < max_seq_length:
delta_len = max_seq_length - len(input_ids)
if use_bert_format:
input_ids = input_ids + [0] * delta_len
input_mask = input_mask + [1] * delta_len
segment_ids = segment_ids + [data_utils.SEG_ID_PAD] * delta_len
else:
input_ids = [0] * delta_len + input_ids
input_mask = [1] * delta_len + input_mask
segment_ids = [data_utils.SEG_ID_PAD] * delta_len + segment_ids
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if label_list is not None:
label_id = label_map[example.label]
else:
label_id = example.label
if example_index < 5:
logging.info("*** Example ***")
logging.info("guid: %s", (example.guid))
logging.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logging.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logging.info("label: %d (id = %d)", example.label, label_id)
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
return feature
|
{
"content_hash": "a781843a283c34fdd649874e0ee1068f",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 80,
"avg_line_length": 32.486486486486484,
"alnum_prop": 0.6485024958402662,
"repo_name": "tombstone/models",
"id": "64363e322633f7ae43d6ffc65c99ee1beff36827",
"size": "5497",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "official/nlp/xlnet/classifier_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1365199"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "1858048"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Python",
"bytes": "7241242"
},
{
"name": "Shell",
"bytes": "102270"
},
{
"name": "TypeScript",
"bytes": "6515"
}
],
"symlink_target": ""
}
|
"""Benchmarks for LossScaleOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.keras.mixed_precision.experimental import loss_scale_optimizer
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training.experimental import loss_scale as loss_scale_module
def _get_strategy(num_gpus):
if num_gpus > 1:
return mirrored_strategy.MirroredStrategy(
['/GPU:%d' % i for i in range(num_gpus)])
else:
return distribution_strategy_context.get_strategy() # The default strategy
class LossScaleBenchmark(test.Benchmark):
"""Benchmark for loss scaling."""
def _benchmark(self, gradient_type, num_gpus, mode, loss_scaling):
"""Benchmarks loss scaling.
We run a simple model with several scalar variables. The loss is the sum of
all variables. The model is simple because we want to measure only the
performance of loss scaling, not the performance of the model itself.
Args:
gradient_type: "optimizer" or "gradient_tape". How gradients are computed.
"optimizer" uses Optimizer.minimize. "gradient_tape" uses
GradientTape.gradient along with LossScaleOptimizer.get_scaled_loss and
LossScaleOptimizer.get_unscaled_gradients.
num_gpus: The number of GPUs to use. Must be at least 1.
mode: "eager" or "tf_function". "tf_function" causes all computations to
be wrapped in a tf.function, while "eager" runs computations eagerly.
loss_scaling: "fixed", "dynamic", or None. The type of loss scaling to
use. None means use no loss scaling, which is useful as a baseline to
see how much slower loss scaling is in comparison.
"""
ls_str = loss_scaling or 'no_loss_scaling'
name = '%s_%d_GPU_%s_%s' % (gradient_type, num_gpus, mode, ls_str)
with context.eager_mode(), _get_strategy(num_gpus).scope() as strategy:
opt = adam.Adam()
if loss_scaling == 'fixed':
loss_scale = loss_scale_module.FixedLossScale(2.)
elif loss_scaling == 'dynamic':
# Make increment_period so high that it's effectively infinite. This
# means the loss scale will never change. Any performance overhead
# from increasing/decreasing the loss scale is typically negligible
# since it happens infrequently, so we only benchmark the common case
# of the loss scale not changing.
increment_period = 1000000
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=2., increment_period=increment_period)
else:
assert loss_scaling is None
loss_scale = None
if loss_scale:
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
num_vars = 200
num_warmup_iters = 1
num_iters = 20
# By using scalar variables, we reduce overhead of the actual GPU work of
# multiplying variables, dividing gradients, and checking gradients for
# NaNs. Measuring these overheads isn't very useful as there is little we
# can do to reduce them (one such way would be to fuse dividing gradients
# and checking them for NaNs). We still have all other overheads, such as
# all-reducing the `is_finite` values and having a tf.cond or
# tf.while_loop based on whether gradients are NaNs. Currently, these
# other overheads are much more significant than the GPU work.
var_list = [
variables.Variable(i, dtype='float32') for i in range(num_vars)]
def get_loss():
return math_ops.add_n(var_list)
if gradient_type == 'gradient_tape':
if loss_scale is None:
def minimize_fn():
with backprop.GradientTape() as tape:
loss = get_loss()
grads = tape.gradient(loss, var_list)
return opt.apply_gradients(zip(grads, var_list))
else:
def minimize_fn():
with backprop.GradientTape() as tape:
loss = get_loss()
scaled_loss = opt.get_scaled_loss(loss)
scaled_grads = tape.gradient(scaled_loss, var_list)
grads = opt.get_unscaled_gradients(scaled_grads)
return opt.apply_gradients(zip(grads, var_list))
else:
assert gradient_type == 'optimizer'
def minimize_fn():
return opt.minimize(get_loss, var_list)
def run_fn():
strategy.run(minimize_fn)
if mode == 'tf_function':
run_fn = def_function.function(run_fn)
for _ in range(num_warmup_iters):
run_fn()
start = time.time()
for _ in range(num_iters):
run_fn()
end = time.time()
self.report_benchmark(iters=num_iters,
wall_time=(end - start) / num_iters, name=name)
def _gpus_to_test_with(self):
num_gpus = context.num_gpus()
gpus_to_test_with = []
if num_gpus >= 1:
gpus_to_test_with.append(1)
if num_gpus >= 2:
gpus_to_test_with.append(2)
if num_gpus >= 8:
gpus_to_test_with.append(8)
return gpus_to_test_with
def benchmark_optimizer(self):
for num_gpus in self._gpus_to_test_with():
for mode in 'eager', 'tf_function':
for loss_scaling in None, 'fixed', 'dynamic':
self._benchmark('optimizer', num_gpus, mode, loss_scaling)
def benchmark_gradient_tape(self):
for num_gpus in self._gpus_to_test_with():
for mode in 'eager', 'tf_function':
for loss_scaling in None, 'fixed', 'dynamic':
self._benchmark('gradient_tape', num_gpus, mode, loss_scaling)
if __name__ == '__main__':
test.main()
|
{
"content_hash": "af7741cd5da6fb2daba96ded1e29597f",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 85,
"avg_line_length": 40.738255033557046,
"alnum_prop": 0.6630971993410214,
"repo_name": "aldian/tensorflow",
"id": "4ebc360b973811ee1659a6c38254e3d58eab3e4a",
"size": "6759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/mixed_precision/experimental/loss_scale_benchmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29667924"
},
{
"name": "CMake",
"bytes": "647100"
},
{
"name": "Go",
"bytes": "976514"
},
{
"name": "Java",
"bytes": "412117"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "275733"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26424665"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373109"
}
],
"symlink_target": ""
}
|
from .forms import BugReportForm
def bug_report_form(request):
return {
"bug_report_form": BugReportForm(
initial={"source_url": request.path, "source": "wheredoivote"}
)
}
|
{
"content_hash": "8f35d997bce6749577e96ff6324d16a2",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 74,
"avg_line_length": 23.444444444444443,
"alnum_prop": 0.6066350710900474,
"repo_name": "DemocracyClub/UK-Polling-Stations",
"id": "3e67c99f6142a3505babb8d07a5c02391bc0eed9",
"size": "211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polling_stations/apps/bug_reports/context_processors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "32"
},
{
"name": "HTML",
"bytes": "85540"
},
{
"name": "JavaScript",
"bytes": "3399"
},
{
"name": "Procfile",
"bytes": "49"
},
{
"name": "Python",
"bytes": "1111337"
},
{
"name": "SCSS",
"bytes": "5742"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models.signals import post_init, post_save
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from symposion.conference.models import Conference
from symposion.sponsorship.managers import SponsorManager
# The benefits we track as individual fields on sponsors
# Names are the names in the database as defined by organizers.
# Field names are the benefit names, lowercased, with
# spaces changed to _, and with "_benefit" appended.
# Column titles are arbitrary.
# "really just care about the ones we have today: print logo, web logo, print description, web description and the ad."
BENEFITS = [
{
'name': 'Web logo',
'field_name': 'web_logo_benefit',
'column_title': _(u"Web Logo"),
}, {
'name': 'Print logo',
'field_name': 'print_logo_benefit',
'column_title': _(u"Print Logo"),
}, {
'name': 'Company Description',
'field_name': 'company_description_benefit',
'column_title': _(u"Web Desc"),
}, {
'name': 'Print Description',
'field_name': 'print_description_benefit',
'column_title': _(u"Print Desc"),
}
]
@python_2_unicode_compatible
class SponsorLevel(models.Model):
conference = models.ForeignKey(Conference, verbose_name=_("Conference"))
name = models.CharField(_("Name"), max_length=100)
order = models.IntegerField(_("Order"), default=0)
cost = models.PositiveIntegerField(_("Cost"))
description = models.TextField(_("Description"), blank=True, help_text=_("This is private."))
available = models.BooleanField(_("Available?"), default=True)
class Meta:
ordering = ["conference", "order"]
verbose_name = _("Sponsor level")
verbose_name_plural = _("Sponsor levels")
def __str__(self):
return "%s %s" % (self.conference, self.name)
def sponsors(self):
return self.sponsor_set.filter(active=True).order_by("added")
class IndividualSponsor(models.Model):
name = models.CharField(_("Sponsor Name"), max_length=100)
display_name = models.CharField(_("Display Name"), max_length=100, blank=True)
is_anonymous = models.BooleanField(_("Anonymous?"), default=False)
amount = models.IntegerField(_("Donation Amount"), default=50)
added = models.DateTimeField(_("added"), default=datetime.datetime.now)
def __str__(self):
return self.name
class Meta:
ordering = ['-amount', 'added']
@python_2_unicode_compatible
class Sponsor(models.Model):
applicant = models.ForeignKey(User, related_name="sponsorships", verbose_name=_("Applicant"),
null=True)
name = models.CharField(_("Sponsor Name"), max_length=100)
display_url = models.URLField(_("display URL"), blank=True)
external_url = models.URLField(_("External URL"))
annotation = models.TextField(_("Annotation"), blank=True)
contact_name = models.CharField(_("Contact Name"), max_length=100)
contact_email = models.EmailField(_("Contact Email"))
level = models.ForeignKey(SponsorLevel, verbose_name=_("level"))
added = models.DateTimeField(_("added"), default=datetime.datetime.now)
active = models.BooleanField(_("active"), default=False)
# Denormalization (this assumes only one logo)
sponsor_logo = models.ForeignKey("SponsorBenefit", related_name="+", null=True, blank=True,
editable=False, verbose_name=_("Sponsor logo"))
# Whether things are complete
# True = complete, False = incomplate, Null = n/a for this sponsor level
web_logo_benefit = models.NullBooleanField(_("Web logo benefit"), help_text=_(u"Web logo benefit is complete"))
print_logo_benefit = models.NullBooleanField(_("Print logo benefit"), help_text=_(u"Print logo benefit is complete"))
print_description_benefit = models.NullBooleanField(_("Print description benefit"), help_text=_(u"Print description benefit is complete"))
company_description_benefit = models.NullBooleanField(_("Company description benefit"), help_text=_(u"Company description benefit is complete"))
objects = SponsorManager()
def __str__(self):
return self.name
class Meta:
verbose_name = _("Sponsor")
verbose_name_plural = _("Sponsors")
ordering = ['name']
def save(self, *args, **kwargs):
# Set fields related to benefits being complete
for benefit in BENEFITS:
field_name = benefit['field_name']
benefit_name = benefit['name']
setattr(self, field_name, self.benefit_is_complete(benefit_name))
super(Sponsor, self).save(*args, **kwargs)
def get_absolute_url(self):
if self.active:
return reverse("sponsor_detail", kwargs={"pk": self.pk})
return reverse("sponsor_list")
def get_display_url(self):
if self.display_url:
return self.display_url
else:
return self.external_url
@property
def twitter_handle(self):
twitter_benefit = self.sponsor_benefits.filter(benefit__name="Twitter")
if twitter_benefit.count():
return twitter_benefit[0].text.lstrip('@')
else:
return None
@property
def website_logo(self):
if self.sponsor_logo is None:
benefits = self.sponsor_benefits.filter(
benefit__type="weblogo", upload__isnull=False)[:1]
if benefits.count():
if benefits[0].upload:
self.sponsor_logo = benefits[0]
self.save()
return self.sponsor_logo.upload
@property
def listing_text(self):
if not hasattr(self, "_listing_text"):
self._listing_text = ""
# @@@ better than hard-coding a pk but still not good
benefits = self.sponsor_benefits.filter(benefit__name="Sponsor Description")
if benefits.count():
self._listing_text = benefits[0].text
return self._listing_text
def reset_benefits(self):
"""
Reset all benefits for this sponsor to the defaults for their
sponsorship level.
"""
level = None
try:
level = self.level
except SponsorLevel.DoesNotExist:
pass
allowed_benefits = []
if level:
for benefit_level in level.benefit_levels.all():
# Create all needed benefits if they don't exist already
sponsor_benefit, created = SponsorBenefit.objects.get_or_create(
sponsor=self, benefit=benefit_level.benefit)
# and set to default limits for this level.
sponsor_benefit.max_words = benefit_level.max_words
sponsor_benefit.other_limits = benefit_level.other_limits
# and set to active
sponsor_benefit.active = True
# @@@ We don't call sponsor_benefit.clean here. This means
# that if the sponsorship level for a sponsor is adjusted
# downwards, an existing too-long text entry can remain,
# and won't raise a validation error until it's next
# edited.
sponsor_benefit.save()
allowed_benefits.append(sponsor_benefit.pk)
# Any remaining sponsor benefits that don't normally belong to
# this level are set to inactive
self.sponsor_benefits.exclude(pk__in=allowed_benefits)\
.update(active=False, max_words=None, other_limits="")
def send_coordinator_emails(self):
pass # @@@ should this just be done centrally?
def benefit_is_complete(self, name):
"""Return True - benefit is complete, False - benefit is not complete,
or None - benefit not applicable for this sponsor's level """
if BenefitLevel.objects.filter(level=self.level, benefit__name=name).exists():
try:
benefit = self.sponsor_benefits.get(benefit__name=name)
except SponsorBenefit.DoesNotExist:
return False
else:
return benefit.is_complete
else:
return None # Not an applicable benefit for this sponsor's level
def _store_initial_level(sender, instance, **kwargs):
if instance:
instance._initial_level_id = instance.level_id
post_init.connect(_store_initial_level, sender=Sponsor)
def _check_level_change(sender, instance, created, **kwargs):
if instance and (created or instance.level_id != instance._initial_level_id):
instance.reset_benefits()
post_save.connect(_check_level_change, sender=Sponsor)
BENEFIT_TYPE_CHOICES = [
("text", _("Text")),
("file", _("File")),
("richtext", _("Rich Text")),
("weblogo", _("Web Logo")),
("simple", _("Simple")),
("option", _("Option"))
]
CONTENT_TYPE_CHOICES = [
("simple", "Simple"),
] + [
("listing_text_%s" % lang, "Listing Text (%s)" % label) for lang, label in settings.LANGUAGES
]
@python_2_unicode_compatible
class Benefit(models.Model):
name = models.CharField(_("Name"), max_length=100)
description = models.TextField(_("Description"), blank=True)
type = models.CharField(_("Type"), choices=BENEFIT_TYPE_CHOICES, max_length=10,
default="simple")
content_type = models.CharField(_("content type"), choices=CONTENT_TYPE_CHOICES,
max_length=20, default="simple")
def __str__(self):
return self.name
@python_2_unicode_compatible
class BenefitLevel(models.Model):
benefit = models.ForeignKey(Benefit, related_name="benefit_levels", verbose_name=_("Benefit"))
level = models.ForeignKey(SponsorLevel, related_name="benefit_levels", verbose_name=_("Level"))
# default limits for this benefit at given level
max_words = models.PositiveIntegerField(_("Max words"), blank=True, null=True)
other_limits = models.CharField(_("Other limits"), max_length=200, blank=True)
class Meta:
ordering = ["level"]
verbose_name = _("Benefit level")
verbose_name_plural = _("Benefit levels")
def __str__(self):
return "%s - %s" % (self.level, self.benefit)
@python_2_unicode_compatible
class SponsorBenefit(models.Model):
sponsor = models.ForeignKey(Sponsor, related_name="sponsor_benefits", verbose_name=_("Sponsor"))
benefit = models.ForeignKey(Benefit, related_name="sponsor_benefits", verbose_name=_("Benefit"))
active = models.BooleanField(default=True, verbose_name=_("Active"))
# Limits: will initially be set to defaults from corresponding BenefitLevel
max_words = models.PositiveIntegerField(_("Max words"), blank=True, null=True)
other_limits = models.CharField(_("Other limits"), max_length=200, blank=True)
# Data: zero or one of these fields will be used, depending on the
# type of the Benefit (text, file, or simple)
text = models.TextField(_("Text"), blank=True)
upload = models.FileField(_("File"), blank=True, upload_to="sponsor_files")
# Whether any assets required from the sponsor have been provided
# (e.g. a logo file for a Web logo benefit).
is_complete = models.NullBooleanField(_("Complete?"), help_text=_(u"True - benefit complete; False - benefit incomplete; Null - n/a"))
class Meta:
ordering = ["-active"]
verbose_name = _("Sponsor benefit")
verbose_name_plural = _("Sponsor benefits")
def __str__(self):
return "%s - %s (%s)" % (self.sponsor, self.benefit, self.benefit.type)
def save(self, *args, **kwargs):
# Validate - save() doesn't clean your model by default, so call
# it explicitly before saving
self.full_clean()
self.is_complete = self._is_complete()
super(SponsorBenefit, self).save(*args, **kwargs)
def clean(self):
num_words = len(self.text.split())
if self.max_words and num_words > self.max_words:
raise ValidationError(
_("Sponsorship level only allows for %(word)s words, you provided %(num)d.") % {
"word": self.max_words, "num": num_words})
def data_fields(self):
"""
Return list of data field names which should be editable for
this ``SponsorBenefit``, depending on its ``Benefit`` type.
"""
if self.benefit.type == "file" or self.benefit.type == "weblogo":
return ["upload"]
elif self.benefit.type in ("text", "richtext", "simple", "option"):
return ["text"]
return []
def _is_text_benefit(self):
return self.benefit.type in ["text", "richtext", "simple"] and bool(self.text)
def _is_upload_benefit(self):
return self.benefit.type in ["file", "weblogo"] and bool(self.upload)
def _is_complete(self):
return self.active and (self._is_text_benefit() or self._is_upload_benefit())
def _denorm_weblogo(sender, instance, created, **kwargs):
if instance:
if instance.benefit.type == "weblogo" and instance.upload:
sponsor = instance.sponsor
sponsor.sponsor_logo = instance
sponsor.save()
post_save.connect(_denorm_weblogo, sender=SponsorBenefit)
|
{
"content_hash": "77e8cb44656918cb65ed28380a634225",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 148,
"avg_line_length": 38.25561797752809,
"alnum_prop": 0.6313238857478523,
"repo_name": "pyohio/symposion",
"id": "1972b8758ee75c86975edb17037f910f20be51de",
"size": "13619",
"binary": false,
"copies": "1",
"ref": "refs/heads/pyohio-2019",
"path": "symposion/sponsorship/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "79954"
},
{
"name": "Python",
"bytes": "287626"
}
],
"symlink_target": ""
}
|
from src.core.page import Page, ResourceLoader, iOSPage
from src.core.r import Resource
class MeFindFriendsSearch(Page, iOSPage):
followUserBtn = ResourceLoader(Resource.followUserBtn)
unfollowUserBtn = ResourceLoader(Resource.unfollowUserBtn)
searchUsersBtn = ResourceLoader(Resource.searchUsersBtn)
runSearchBtn = ResourceLoader(Resource.runSearchBtn)
def __init__(self, box, settings):
super(MeFindFriendsSearch, self).__init__(box, settings)
self.box = box
self.settings = settings
self.followUserBtn = self.box
self.unfollowUserBtn = self.box
self.searchUsersBtn = self.box
self.checkIfLoaded(['searchUsersBtn'])
def actionFindFriend(self, friendName):
self.searchUsersBtn.click()
self.inputText(self, friendName)
self.runSearchBtn.click()
self.waitPageLoad()
return self
def followFirstFriend(self):
self.followUserBtn.click()
return self
def unfollowFirstFriend(self):
self.unfollowUserBtn.click()
return self
class MeFindFriendsSearchiOS(MeFindFriendsSearch):
pass
|
{
"content_hash": "de506238f52cccb25f8e3b4b869b8603",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 64,
"avg_line_length": 28.78048780487805,
"alnum_prop": 0.6779661016949152,
"repo_name": "azoft-dev-team/imagrium",
"id": "0ef26ccdb454479d02f303001bae5c27646429a0",
"size": "1180",
"binary": false,
"copies": "1",
"ref": "refs/heads/win",
"path": "src/pages/me/me_find_friends_search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22116"
},
{
"name": "Groff",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "111703"
},
{
"name": "Java",
"bytes": "448343"
},
{
"name": "Python",
"bytes": "14076342"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Ruby",
"bytes": "5269"
},
{
"name": "Shell",
"bytes": "3193"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
from cs231n.layers import *
from cs231n.fast_layers import *
def affine_relu_forward(x, w, b):
"""
Convenience layer that perorms an affine transform followed by a ReLU
Inputs:
- x: Input to the affine layer
- w, b: Weights for the affine layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, fc_cache = affine_forward(x, w, b)
out, relu_cache = relu_forward(a)
cache = (fc_cache, relu_cache)
return out, cache
def affine_relu_backward(dout, cache):
"""
Backward pass for the affine-relu convenience layer
"""
fc_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dx, dw, db = affine_backward(da, fc_cache)
return dx, dw, db
def conv_relu_forward(x, w, b, conv_param):
"""
A convenience layer that performs a convolution followed by a ReLU.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
Returns a tuple of:
- out: Output from the ReLU
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
out, relu_cache = relu_forward(a)
cache = (conv_cache, relu_cache)
return out, cache
def conv_relu_backward(dout, cache):
"""
Backward pass for the conv-relu convenience layer.
"""
conv_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dx, dw, db = conv_backward_fast(da, conv_cache)
return dx, dw, db
def conv_relu_pool_forward(x, w, b, conv_param, pool_param):
"""
Convenience layer that performs a convolution, a ReLU, and a pool.
Inputs:
- x: Input to the convolutional layer
- w, b, conv_param: Weights and parameters for the convolutional layer
- pool_param: Parameters for the pooling layer
Returns a tuple of:
- out: Output from the pooling layer
- cache: Object to give to the backward pass
"""
a, conv_cache = conv_forward_fast(x, w, b, conv_param)
s, relu_cache = relu_forward(a)
out, pool_cache = max_pool_forward_fast(s, pool_param)
cache = (conv_cache, relu_cache, pool_cache)
return out, cache
def conv_relu_pool_backward(dout, cache):
"""
Backward pass for the conv-relu-pool convenience layer
"""
conv_cache, relu_cache, pool_cache = cache
ds = max_pool_backward_fast(dout, pool_cache)
da = relu_backward(ds, relu_cache)
dx, dw, db = conv_backward_fast(da, conv_cache)
return dx, dw, db
|
{
"content_hash": "52670dbc2a5afd115cfa7d88190dd249",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 72,
"avg_line_length": 27.08888888888889,
"alnum_prop": 0.6833470057424118,
"repo_name": "5hubh4m/CS231n",
"id": "8baa9627ec5dc4632d80947cc6913799baca5d6b",
"size": "2438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Assignment2/cs231n/layer_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "9191840"
},
{
"name": "Python",
"bytes": "214685"
},
{
"name": "Shell",
"bytes": "540"
}
],
"symlink_target": ""
}
|
from unittest import skipIf
import os
from django.db import IntegrityError
from django.test import TransactionTestCase
from django_migration_testcase import MigrationTest
EXAMPLE_CORRUPTION = bool(os.environ.get('EXAMPLE_CORRUPTION', False))
class Test0002AwesomeModelUniqueName(MigrationTest):
app_name = 'testing_migrations'
before = '0001_initial'
after = '0002_awesome_model_unique_name'
@skipIf(not EXAMPLE_CORRUPTION, "This test corrupts the migration state of the tests.")
def test_migration(self):
awesome_model_model = self.get_model_before('testing_migrations.AwesomeModel')
awesome_model_model.objects.create(name="foo")
awesome_model_model.objects.create(name="foo")
with self.assertRaisesMessage(IntegrityError, "UNIQUE constraint failed: testing_migrations_awesomemodel.name"):
self.run_migration()
class Test9999AnotherTransactionTest(TransactionTestCase):
"""
This test use an `TransactionTestCase` for this demo since they are run at the same time as `MigrationTestCase`.
This test will fail if the other test is run.
"""
def test_this_factory_is_supposed_to_work(self):
from .factories import KittenNameFactory
kitten = KittenNameFactory(name="foo")
self.assertTrue(kitten.name, "foo")
|
{
"content_hash": "684771d79c8cc5d42956e6e85704e4a9",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 120,
"avg_line_length": 39.96969696969697,
"alnum_prop": 0.7376800606520091,
"repo_name": "jrobichaud/django-awesome-presentations",
"id": "a389d76695b3a22d4bd71438cca69046001918be",
"size": "1319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing_migrations/test_debug_hell.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29582"
}
],
"symlink_target": ""
}
|
import mock
import pytest
import unittest
from addons.base.tests.models import (OAuthAddonNodeSettingsTestSuiteMixin,
OAuthAddonUserSettingTestSuiteMixin)
from addons.github.models import NodeSettings
from addons.github.tests import factories
from osf_tests.factories import ProjectFactory, UserFactory
from nose.tools import (assert_equal, assert_false, assert_in, assert_is,
assert_not_equal, assert_not_in, assert_true)
from github3 import GitHubError
from github3.repos import Repository
from tests.base import OsfTestCase, get_default_metaschema
from framework.auth import Auth
from addons.github.exceptions import NotFoundError
from .utils import create_mock_github
mock_github = create_mock_github()
pytestmark = pytest.mark.django_db
class TestNodeSettings(OAuthAddonNodeSettingsTestSuiteMixin, unittest.TestCase):
short_name = 'github'
full_name = 'GitHub'
ExternalAccountFactory = factories.GitHubAccountFactory
NodeSettingsFactory = factories.GitHubNodeSettingsFactory
NodeSettingsClass = NodeSettings
UserSettingsFactory = factories.GitHubUserSettingsFactory
## Mixin Overrides ##
def _node_settings_class_kwargs(self, node, user_settings):
return {
'user_settings': self.user_settings,
'repo': 'mock',
'user': 'abc',
'owner': self.node
}
def test_set_folder(self):
# GitHub doesn't use folderpicker, and the nodesettings model
# does not need a `set_repo` method
pass
def test_serialize_settings(self):
# GitHub's serialized_settings are a little different from
# common storage addons.
settings = self.node_settings.serialize_waterbutler_settings()
expected = {'owner': self.node_settings.user, 'repo': self.node_settings.repo}
assert_equal(settings, expected)
@mock.patch(
'addons.github.models.UserSettings.revoke_remote_oauth_access',
mock.PropertyMock()
)
def test_complete_has_auth_not_verified(self):
super(TestNodeSettings, self).test_complete_has_auth_not_verified()
@mock.patch('addons.github.api.GitHubClient.repos')
@mock.patch('addons.github.api.GitHubClient.my_org_repos')
def test_to_json(self, mock_org, mock_repos):
mock_repos.return_value = {}
mock_org.return_value = {}
super(TestNodeSettings, self).test_to_json()
@mock.patch('addons.github.api.GitHubClient.repos')
@mock.patch('addons.github.api.GitHubClient.my_org_repos')
def test_to_json_user_is_owner(self, mock_org, mock_repos):
mock_repos.return_value = {}
mock_org.return_value = {}
result = self.node_settings.to_json(self.user)
assert_true(result['user_has_auth'])
assert_equal(result['github_user'], 'abc')
assert_true(result['is_owner'])
assert_true(result['valid_credentials'])
assert_equal(result.get('repo_names', None), [])
@mock.patch('addons.github.api.GitHubClient.repos')
@mock.patch('addons.github.api.GitHubClient.my_org_repos')
def test_to_json_user_is_not_owner(self, mock_org, mock_repos):
mock_repos.return_value = {}
mock_org.return_value = {}
not_owner = UserFactory()
result = self.node_settings.to_json(not_owner)
assert_false(result['user_has_auth'])
assert_equal(result['github_user'], 'abc')
assert_false(result['is_owner'])
assert_true(result['valid_credentials'])
assert_equal(result.get('repo_names', None), None)
class TestUserSettings(OAuthAddonUserSettingTestSuiteMixin, unittest.TestCase):
short_name = 'github'
full_name = 'GitHub'
ExternalAccountFactory = factories.GitHubAccountFactory
def test_public_id(self):
assert_equal(self.user.external_accounts.first().display_name, self.user_settings.public_id)
class TestCallbacks(OsfTestCase):
def setUp(self):
super(TestCallbacks, self).setUp()
self.project = ProjectFactory()
self.consolidated_auth = Auth(self.project.creator)
self.project.creator.save()
self.non_authenticator = UserFactory()
self.non_authenticator.save()
self.project.save()
self.project.add_contributor(
contributor=self.non_authenticator,
auth=self.consolidated_auth,
)
self.project.add_addon('github', auth=self.consolidated_auth)
self.project.creator.add_addon('github')
self.external_account = factories.GitHubAccountFactory()
self.project.creator.external_accounts.add(self.external_account)
self.project.creator.save()
self.node_settings = self.project.get_addon('github')
self.user_settings = self.project.creator.get_addon('github')
self.node_settings.user_settings = self.user_settings
self.node_settings.user = 'Queen'
self.node_settings.repo = 'Sheer-Heart-Attack'
self.node_settings.external_account = self.external_account
self.node_settings.save()
self.node_settings.set_auth
@mock.patch('addons.github.api.GitHubClient.repo')
def test_before_make_public(self, mock_repo):
mock_repo.side_effect = NotFoundError
result = self.node_settings.before_make_public(self.project)
assert_is(result, None)
@mock.patch('addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_public_gh_public(self, mock_repo):
self.project.is_public = True
self.project.save()
mock_repo.return_value = Repository.from_json({'private': False})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_false(message)
@mock.patch('addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_public_gh_private(self, mock_repo):
self.project.is_public = True
self.project.save()
mock_repo.return_value = Repository.from_json({'private': True})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
@mock.patch('addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_private_gh_public(self, mock_repo):
mock_repo.return_value = Repository.from_json({'private': False})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_true(message)
@mock.patch('addons.github.api.GitHubClient.repo')
def test_before_page_load_osf_private_gh_private(self, mock_repo):
mock_repo.return_value = Repository.from_json({'private': True})
message = self.node_settings.before_page_load(self.project, self.project.creator)
mock_repo.assert_called_with(
self.node_settings.user,
self.node_settings.repo,
)
assert_false(message)
def test_before_page_load_not_contributor(self):
message = self.node_settings.before_page_load(self.project, UserFactory())
assert_false(message)
def test_before_page_load_not_logged_in(self):
message = self.node_settings.before_page_load(self.project, None)
assert_false(message)
def test_before_remove_contributor_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.project.creator
)
assert_true(message)
def test_before_remove_contributor_not_authenticator(self):
message = self.node_settings.before_remove_contributor(
self.project, self.non_authenticator
)
assert_false(message)
def test_after_remove_contributor_authenticator_self(self):
message = self.node_settings.after_remove_contributor(
self.project, self.project.creator, self.consolidated_auth
)
assert_equal(
self.node_settings.user_settings,
None
)
assert_true(message)
assert_not_in('You can re-authenticate', message)
def test_after_remove_contributor_authenticator_not_self(self):
auth = Auth(user=self.non_authenticator)
message = self.node_settings.after_remove_contributor(
self.project, self.project.creator, auth
)
assert_equal(
self.node_settings.user_settings,
None
)
assert_true(message)
assert_in('You can re-authenticate', message)
def test_after_remove_contributor_not_authenticator(self):
self.node_settings.after_remove_contributor(
self.project, self.non_authenticator, self.consolidated_auth
)
assert_not_equal(
self.node_settings.user_settings,
None,
)
def test_after_fork_authenticator(self):
fork = ProjectFactory()
clone, message = self.node_settings.after_fork(
self.project, fork, self.project.creator,
)
assert_equal(
self.node_settings.user_settings,
clone.user_settings,
)
def test_after_fork_not_authenticator(self):
fork = ProjectFactory()
clone, message = self.node_settings.after_fork(
self.project, fork, self.non_authenticator,
)
assert_equal(
clone.user_settings,
None,
)
def test_after_delete(self):
self.project.remove_node(Auth(user=self.project.creator))
# Ensure that changes to node settings have been saved
self.node_settings.reload()
assert_true(self.node_settings.user_settings is None)
@mock.patch('website.archiver.tasks.archive')
def test_does_not_get_copied_to_registrations(self, mock_archive):
registration = self.project.register_node(
schema=get_default_metaschema(),
auth=Auth(user=self.project.creator),
data='hodor',
)
assert_false(registration.has_addon('github'))
class TestGithubNodeSettings(unittest.TestCase):
def setUp(self):
super(TestGithubNodeSettings, self).setUp()
self.user = UserFactory()
self.user.add_addon('github')
self.user_settings = self.user.get_addon('github')
self.external_account = factories.GitHubAccountFactory()
self.user_settings.owner.external_accounts.add(self.external_account)
self.user_settings.owner.save()
self.node_settings = factories.GitHubNodeSettingsFactory(user_settings=self.user_settings)
@mock.patch('addons.github.api.GitHubClient.delete_hook')
def test_delete_hook(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_true(res)
mock_delete_hook.assert_called_with(*args)
@mock.patch('addons.github.api.GitHubClient.delete_hook')
def test_delete_hook_no_hook(self, mock_delete_hook):
res = self.node_settings.delete_hook()
assert_false(res)
assert_false(mock_delete_hook.called)
@mock.patch('addons.github.api.GitHubClient.delete_hook')
def test_delete_hook_not_found(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
mock_delete_hook.side_effect = NotFoundError
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_false(res)
mock_delete_hook.assert_called_with(*args)
@mock.patch('addons.github.api.GitHubClient.delete_hook')
def test_delete_hook_error(self, mock_delete_hook):
self.node_settings.hook_id = 'hook'
self.node_settings.save()
mock_delete_hook.side_effect = GitHubError(mock.Mock())
args = (
self.node_settings.user,
self.node_settings.repo,
self.node_settings.hook_id,
)
res = self.node_settings.delete_hook()
assert_false(res)
mock_delete_hook.assert_called_with(*args)
|
{
"content_hash": "e77aa49b862add7343b82e8b29eabade",
"timestamp": "",
"source": "github",
"line_count": 336,
"max_line_length": 100,
"avg_line_length": 37.68154761904762,
"alnum_prop": 0.6534239001658637,
"repo_name": "mluo613/osf.io",
"id": "54118a7d76be076401d129ce1e27250a9dbe31d5",
"size": "12686",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "addons/github/tests/test_models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "176516"
},
{
"name": "HTML",
"bytes": "181210"
},
{
"name": "JavaScript",
"bytes": "2015658"
},
{
"name": "Jupyter Notebook",
"bytes": "19626"
},
{
"name": "Mako",
"bytes": "748050"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "8492180"
},
{
"name": "Shell",
"bytes": "379"
}
],
"symlink_target": ""
}
|
"""
sdata.py
this file contains a class sdata which stores sparse data in a tree like
structure that mimics an n dimensional array.
"""
import cPickle
class sdata:
def __init__(self, shape=(1,)):
self.shape = shape
self._data = []
self._indice = []
def save(self, fname):
with open(fname, 'ab') as data_file:
pickler = cPickle.Pickler(data_file, -1)
pickler.dump(self.shape)
pickler.dump(self._indice)
for _ in xrange(len(self._indice)):
with open(fname, 'ab') as data_file:
pickler = cPickle.Pickler(data_file,-1)
pickler.dump(self._data[0])
del self._data[0]
def load(self, unpickler):
self.shape = unpickler.load()
self._indice = unpickler.load()
self._data = [0]*len(self._indice)
for i in xrange(len(self._indice)):
self._data[i] = unpickler.load()
def _normalize(self, valid_chars, first=True):
if len(self.shape) != 1:
if first:
print "0.0, %"
for i, sub_array in enumerate(self._data):
sub_array._normalize(valid_chars, False)
if first:
percentage = 100*(i + 1)/float(len(valid_chars))
print ("%.1f"%percentage) + " %"
if first:
print "\n"
else:
total = sum(self._data)
if total !=0:
self._data = [value / float(total) for value in self._data]
def __len__(self):
return self.shape[0]
def __str__(self):
string = ", ".join([str(data) for data in self])
return "[" + string + "]"
def __getitem__(self, key):
try:
if len(key) == 1:
key = key[0]
except TypeError:
pass
try:
index = key[0]
key_pass = key[1:]
if index < 0:
index = index % len(self)
if (index >= len(self)) or (len(key) > len(self.shape)):
raise IndexError("sdata index out of range")
if index in self._indice:
id = self._indice.index(index)
return self._data[id][key_pass]
else:
tmp_shape = self.shape
while (len(key) != 0):
tmp_shape = tmp_shape[1:]
index = key[0]
key = key[1:]
if index < 0:
index = index % len(self)
if index >= len(self):
raise IndexError("sdata index out of range")
if len(tmp_shape) != 0:
return sdata(tmp_shape)
else:
return 0
except TypeError:
if key < 0:
key = key % len(self)
if key >= len(self):
raise IndexError("sdata index out of range")
if key in self._indice:
id = self._indice.index(key)
return self._data[id]
else:
if len(self.shape) != 1:
return sdata(self.shape[1:])
else:
return 0
def __setitem__(self, key, value):
try:
if len(key) == 1:
key = key[0]
except TypeError:
pass
try:
index = key[0]
key_pass = key[1:]
if index < 0:
index = index % len(self)
if (index >= len(self)) or (len(key) > len(self.shape)):
raise IndexError("sdata index out of range")
if index in self._indice:
id = self._indice.index(index)
self._data[id][key_pass] = value
else:
self._indice.append(index)
self._data.append(sdata(self.shape[1:]))
self._data[-1][key_pass] = value
except TypeError:
if key < 0:
key = key % len(self)
if key >= len(self):
raise IndexError("sdata index out of range")
if key in self._indice:
id = self._indice.index(key)
self._data[id] = value
else:
self._indice.append(key)
self._data.append(value)
def __iter__(self):
return sdata_iter(self)
class sdata_iter:
def __init__(self, sdata_obj):
self.sdata_obj = sdata_obj
self.index = 0
def __iter__(self):
return self
def next(self):
try:
data = self.sdata_obj[self.index]
self.index += 1
return data
except IndexError:
raise StopIteration()
|
{
"content_hash": "1ed4e0ef19a5b2d7aba4eeec62c452af",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 75,
"avg_line_length": 30.401234567901234,
"alnum_prop": 0.44243654822335027,
"repo_name": "lsiemens/iprocess-projects",
"id": "a394e6ffeb0cf75fded63bb5383f8b3dd8069fec",
"size": "6485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TextGen_1.2/sdata.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "32324"
},
{
"name": "Python",
"bytes": "61696"
}
],
"symlink_target": ""
}
|
import random
import sys
import gdb
# List of colors used for printing messages
RED = "\033[1;31m"
BLUE = "\033[1;34m"
CYAN = "\033[1;36m"
GREEN = "\033[0;32m"
RESET = "\033[0;0m"
BOLD = "\033[;1m"
REVERSE = "\033[;7m"
def error_output(massage):
"""Print out a red output"""
sys.stdout.write(BOLD + RED)
print(massage)
sys.stdout.write(RESET)
def warning_output(massage):
"""Print out a cyan output"""
sys.stdout.write(BOLD + CYAN)
print(massage)
sys.stdout.write(RESET)
def positive_output(massage):
"""Print out a green output"""
sys.stdout.write(BOLD + GREEN)
print(massage)
sys.stdout.write(RESET)
def neutral_output(massage):
"""Print out a blue output"""
sys.stdout.write(BLUE)
print(massage)
sys.stdout.write(RESET)
def parse_input_file(inputfile_name):
""" Read in the inputfile and parse it.
Then return a list of important tokens."""
with open(inputfile_name) as inputfile:
for line in inputfile:
stripped_line = line.strip()
# Ignore commented lines
if not stripped_line.startswith("#"):
if stripped_line.startswith("executable"):
# Get the path of the executable
# for fault injection experiment
try:
filename = stripped_line.split()[1]
except BaseException:
error_output(
"Please provide a valid path for your executable\n"
)
elif stripped_line.startswith("arguments"):
# Get the arguments for target program
if len(stripped_line.split()) != 1:
arguments = stripped_line.split(maxsplit=1)[1]
else:
arguments = None
elif stripped_line.startswith("fault"):
# Get the fault list for fault injection experiment
if len(stripped_line.split()) == 7:
# If the user set the scope using line number
variable = stripped_line.split()[1]
scope_begin = stripped_line.split()[2]
scope_end = stripped_line.split()[3]
scope_hit_threshold = stripped_line.split()[4]
variable_access_threshold = stripped_line.split()[5]
fault_model = stripped_line.split()[6]
elif len(stripped_line.split()) == 6:
# If the user set the scope using function name
variable = stripped_line.split()[1]
scope_begin = stripped_line.split()[2]
scope_end = None
scope_hit_threshold = stripped_line.split()[3]
variable_access_threshold = stripped_line.split()[4]
fault_model = stripped_line.split()[5]
elif len(stripped_line.split()) == 5:
# If the user want to inject a fault
# into a global variable
variable = stripped_line.split()[1]
scope_begin = None
scope_end = None
scope_hit_threshold = stripped_line.split()[2]
variable_access_threshold = stripped_line.split()[3]
fault_model = stripped_line.split()[4]
elif stripped_line.startswith("probe"):
# Get the list of variables we wish to observe
# after fault injection
if len(stripped_line.split()) == 3:
# If the probe variable is located in another function
probe_var = stripped_line.split()[1]
probe_function = stripped_line.split()[2]
elif len(stripped_line.split()) == 2:
# If the probe variable is in the same function or a global variable
probe_var = stripped_line.split()[1]
probe_function = None
else:
error_output(
"Please follow the instruction in the input file template\n"
)
gdb.execute('set confirm off')
gdb.execute('quit')
return (filename, arguments, variable, scope_begin, scope_end,
scope_hit_threshold, variable_access_threshold, fault_model,
probe_var, probe_function)
def load_target(filename, arguments):
"""Try to set a access watchpoint on the target variable."""
try:
gdb.execute('file ' + filename)
if arguments is not None:
gdb.execute('set args ' + arguments)
gdb.execute('clone-inferior') # For comparing
return True
except gdb.error:
error_output("Error reading the target program")
return False
def run_target(r_inferior, f_inferior):
"""Try to start the program."""
try:
gdb.execute('inferior ' + str(r_inferior.num))
gdb.execute('run')
gdb.execute('inferior ' + str(f_inferior.num))
gdb.execute('run')
return True
except gdb.error:
error_output(
"Unable to run the target program with breakpoints/watchpoints set"
)
gdb.execute('delete')
gdb.execute('set confirm off')
gdb.execute('quit')
return False
def continue_target(inferior_obj):
"""Continue chosen inferior"""
try:
gdb.execute('inferior ' + str(inferior_obj.num))
gdb.execute('continue')
return True
except gdb.error:
error_output("Unable to continue the target inferior:")
error_output(str(inferior_obj.num))
return False
def set_breakpoint(location):
"""Try to set a access watchpoint on the target variable."""
try:
b = gdb.Breakpoint(location)
b.silent = True
return b
except gdb.error:
error_output("Invalid breakpoint location detected")
return None
def watch_variable(inferior_obj, variable):
"""Try to set a access watchpoint on the target variable."""
try:
gdb.execute('inferior ' + str(inferior_obj.num))
watched_variable = gdb.Breakpoint(variable, gdb.BP_WATCHPOINT,
gdb.WP_ACCESS)
watched_variable.silent = True
return watched_variable
except gdb.error:
error_output("Unable to set a watchpoint for the target variable, "
"it is optimized-out\n")
return None
def get_variable(inferior_obj, variable):
"""Try to get the value of the target variable
and return it as a string."""
try:
gdb.execute('inferior ' + str(inferior_obj.num))
return str(gdb.parse_and_eval(variable))
except gdb.error:
return None
def set_scope(scope_begin, scope_end=None):
"""Setting breakpoints at the beginning and the end of the scope
before running the program."""
if scope_end is None:
# Setting scope when function name is the beginning of the scope
neutral_output("The scope set in the function " + str(scope_begin))
b_begin = set_breakpoint(scope_begin)
b_end = None
else:
# Setting scope when scope is specified in line numbers
neutral_output("The scope starts from line " + str(scope_begin) +
" to " + str(scope_end))
b_begin = set_breakpoint(scope_begin)
b_end = set_breakpoint(scope_end)
return (b_begin, b_end)
def delete_breakpoint(bp0=None, bp1=None, bp2=None, bp3=None):
"""Deleting breakpoints to get ready to finish the target program"""
if bp0 is None:
return True
elif bp1 is None:
try:
bp0.delete()
return True
except gdb.error:
error_output("Unable to delete the breakpoints")
return False
elif bp2 is None:
try:
bp0.delete()
bp1.delete()
return True
except gdb.error:
error_output("Unable to delete the breakpoints")
return False
elif bp3 is None:
try:
bp0.delete()
bp1.delete()
bp2.delete()
return True
except gdb.error:
error_output("Unable to delete the breakpoints")
return False
else:
try:
bp0.delete()
bp1.delete()
bp2.delete()
bp3.delete()
return True
except gdb.error:
error_output("Unable to delete the breakpoints")
return False
def inject(inferior_obj, variable, fault_model):
""" Whitebox fault injection."""
gdb.execute('inferior ' + str(inferior_obj.num))
try:
target_variable = gdb.parse_and_eval(variable)
except gdb.error:
error_output("Out of the scope which the target variable is valid in")
return False
if target_variable.is_optimized_out:
error_output(
"The target variable is optimized out, fault injection is unable to complete"
)
return False
if fault_model == 'BIT_FLIPS':
try:
size = target_variable.type.sizeof * 8
error = 1 << random.randint(0, size)
gdb.execute('set var ' + variable + ' = ' + variable + ' ^ ' +
str(error))
return True
except gdb.error:
error_output("Unable to perform BIT-FLIP fault injection")
return False
elif fault_model == 'INC_VALUE':
try:
gdb.execute('set var ' + variable + ' = ' + variable + ' + 1')
return True
except gdb.error:
error_output("Unable to perform INCREMENTATION fault injection")
return False
elif fault_model == 'DEC_VALUE':
try:
gdb.execute('set var ' + variable + ' = ' + variable + ' - 1')
return True
except gdb.error:
error_output("Unable to perform DECREMENTATION fault injection")
return False
elif fault_model == 'SET_ZERO':
try:
gdb.execute('set var ' + variable + ' = 0')
return True
except gdb.error:
error_output("Unable to perform SET-ZERO fault injection")
return False
else:
warning_output(
"Please set the target variable and fault_model correctly")
return False
def try_injecting_in_scope(r_inferior,
f_inferior,
variable,
variable_access_threshold,
fault_model,
b_begin,
b_end=None):
"""When in the right code scope,
wait for the variable access threshold being hit
and inject fault"""
if variable_access_threshold == 0:
# If the variable access threshold is set to 0,
# inject fault immediately
# regardless whether the target variable is accessed
injection_status = inject(f_inferior, variable, fault_model)
delete_breakpoint(b_begin, b_end)
else:
watched_variable_r = watch_variable(r_inferior, variable)
watched_variable_f = watch_variable(f_inferior, variable)
if watched_variable_f is None:
injection_status = False
delete_breakpoint(b_begin, b_end)
else:
while watched_variable_f.is_valid(
) is True and watched_variable_f.hit_count != variable_access_threshold:
neutral_output(
"Waiting for reaching the variable access threshold")
if b_end is not None and b_end.hit_count != 0:
warning_output("Scope end hit, finishing target execution")
injection_status = False
delete_breakpoint(watched_variable_r, watched_variable_f,
b_begin, b_end)
break
continue_target(r_inferior)
continue_target(f_inferior)
else:
if watched_variable_f.is_valid() is False:
injection_status = False
delete_breakpoint(b_begin, b_end)
else:
injection_status = inject(f_inferior, variable,
fault_model)
delete_breakpoint(watched_variable_r, watched_variable_f,
b_begin, b_end)
return injection_status
def injection_process(r_inferior, f_inferior, variable, scope_begin, scope_end,
scope_hit_threshold, variable_access_threshold,
fault_model):
"""The main fault injection process."""
if scope_begin is None:
neutral_output("Target global variable:")
neutral_output(variable)
# When the target variable is a global one,
# skip setting the scope and start injecting fault
watched_variable_r = watch_variable(r_inferior, variable)
watched_variable_f = watch_variable(f_inferior, variable)
run_target(r_inferior, f_inferior)
if watched_variable_f is None:
continue_target(r_inferior)
continue_target(f_inferior)
if variable_access_threshold == 0:
# If the variable access threshold is set to 0,
# inject fault immediately
# regardless whether the target variable is accessed
injection_status = inject(f_inferior, variable, fault_model)
else:
while watched_variable_f.hit_count != variable_access_threshold:
neutral_output(
"Waiting for reaching the variable access threshold")
continue_target(r_inferior)
continue_target(f_inferior)
else:
injection_status = inject(f_inferior, variable, fault_model)
delete_breakpoint(watched_variable_r, watched_variable_f)
elif scope_end is None:
neutral_output("Target function:")
positive_output(scope_begin)
neutral_output("Target variable:")
positive_output(variable)
# When the scope is specified as a function's name
(b_begin, _) = set_scope(scope_begin, scope_end)
run_target(r_inferior, f_inferior)
while b_begin.hit_count != scope_hit_threshold:
neutral_output("Waiting for reaching the scope hit threshold")
continue_target(r_inferior)
continue_target(f_inferior)
else:
injection_status = try_injecting_in_scope(
r_inferior, f_inferior, variable, variable_access_threshold,
fault_model, b_begin)
else:
neutral_output("Target scope: ")
positive_output(scope_begin)
positive_output(scope_end)
neutral_output("Target variable:")
positive_output(variable)
# When the scope is specified in line number
(b_begin, b_end) = set_scope(scope_begin, scope_end)
run_target(r_inferior, f_inferior)
while b_begin.hit_count != scope_hit_threshold:
neutral_output("Waiting for reaching the scope hit threshold")
continue_target(r_inferior)
continue_target(f_inferior)
else:
injection_status = try_injecting_in_scope(
r_inferior, f_inferior, variable, variable_access_threshold,
fault_model, b_begin, b_end)
return injection_status
def observe_outcome(r_inferior, f_inferior, probe_function, probe_var):
"""Start watching a target variable for observing fault outcome."""
pc_start = int(gdb.selected_frame().pc())
if probe_function is not None:
b = set_breakpoint(probe_function)
continue_target(r_inferior)
continue_target(f_inferior)
else:
b = None
watched_variable_r = watch_variable(r_inferior, probe_var)
watched_variable_f = watch_variable(f_inferior, probe_var)
while True:
variable_r = get_variable(r_inferior, probe_var)
variable_f = get_variable(f_inferior, probe_var)
if variable_r is None or variable_f is None:
error_output(
"Out of the scope which the probe variable is valid in")
delete_breakpoint(b)
break
elif variable_r == variable_f:
continue_target(r_inferior)
continue_target(f_inferior)
else:
pc_stop = int(gdb.selected_frame().pc())
delete_breakpoint(watched_variable_r, watched_variable_f)
delete_breakpoint(b)
break
try:
latency = pc_stop - pc_start
positive_output("Variable contaminated by the fault at:")
positive_output(hex(pc_stop))
except NameError:
return None
return (variable_r, variable_f, hex(latency))
def exit_handler(event):
"""Quit GDB as soon as the program exited. """
if hasattr(event, 'exit_code'):
neutral_output(
"-----------------------------------------------------------------"
)
gdb.execute('set confirm off')
gdb.execute('quit')
def fault_injection(input_file):
""" The main process. """
gdb.events.exited.connect(exit_handler)
# gdb.events.stop.connect(stop_handler)
(filename, arguments, variable, scope_begin, scope_end,
scope_hit_threshold, variable_access_threshold, fault_model,
probe_function, probe_var) = parse_input_file(input_file)
scope_hit_threshold = int(scope_hit_threshold)
variable_access_threshold = int(variable_access_threshold)
# By definition, scope hit threshold cannot be 0
if scope_hit_threshold == 0:
scope_hit_threshold = 1
if load_target(filename, arguments) is False:
gdb.execute('set confirm off')
gdb.execute('quit')
(r_inferior, f_inferior) = gdb.inferiors()
if injection_process(r_inferior, f_inferior, variable, scope_begin,
scope_end, scope_hit_threshold,
variable_access_threshold, fault_model) is True:
try:
positive_output("\nFault Injection Complete\n")
# Continue running the target program till it exits
(variable_r, variable_f, latency) = observe_outcome(r_inferior, f_inferior, probe_var, probe_function)
if latency is None:
neutral_output(
"\nThe probe variable is not contaminated by the injected fault"
)
gdb.execute('set confirm off')
gdb.execute('quit')
else:
positive_output("\nThe probe variable should be:")
positive_output(variable_r)
positive_output("\nThe probe variable now is:")
positive_output(variable_f)
positive_output("\nThe fault latency is:")
positive_output(latency)
gdb.execute('set confirm off')
gdb.execute('quit')
except RuntimeError:
error_output(
"Unable to finish the program after fault injection, debug?\n")
gdb.execute('set confirm off')
gdb.execute('quit')
else:
warning_output("Program ran with no faults injected\n")
continue_target(r_inferior)
continue_target(f_inferior)
gdb.execute('set pagination off')
|
{
"content_hash": "413d543c6692c5cd85d62de90c074583",
"timestamp": "",
"source": "github",
"line_count": 522,
"max_line_length": 114,
"avg_line_length": 37.93869731800766,
"alnum_prop": 0.5621086649161785,
"repo_name": "timtian090/Playground",
"id": "a5b30a23bfe817c27597103943955f5434f3a524",
"size": "19804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GDB/observed_fault_injection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "455497"
},
{
"name": "Batchfile",
"bytes": "11990"
},
{
"name": "C",
"bytes": "28095202"
},
{
"name": "C++",
"bytes": "1140083"
},
{
"name": "CSS",
"bytes": "68761"
},
{
"name": "Coq",
"bytes": "8402"
},
{
"name": "DIGITAL Command Language",
"bytes": "234545"
},
{
"name": "Emacs Lisp",
"bytes": "481"
},
{
"name": "Forth",
"bytes": "3527"
},
{
"name": "HTML",
"bytes": "1389398"
},
{
"name": "Haskell",
"bytes": "57451"
},
{
"name": "Logos",
"bytes": "540"
},
{
"name": "M4",
"bytes": "580948"
},
{
"name": "Makefile",
"bytes": "2911831"
},
{
"name": "Objective-C",
"bytes": "1066317"
},
{
"name": "OpenEdge ABL",
"bytes": "1334"
},
{
"name": "PHP",
"bytes": "22166"
},
{
"name": "PLSQL",
"bytes": "133870"
},
{
"name": "Pawn",
"bytes": "9784"
},
{
"name": "Perl",
"bytes": "13699416"
},
{
"name": "Perl 6",
"bytes": "868681"
},
{
"name": "Prolog",
"bytes": "90000"
},
{
"name": "Python",
"bytes": "217011"
},
{
"name": "Roff",
"bytes": "605811"
},
{
"name": "Rust",
"bytes": "5300"
},
{
"name": "Scala",
"bytes": "277077"
},
{
"name": "Scilab",
"bytes": "120244"
},
{
"name": "Shell",
"bytes": "1945400"
},
{
"name": "SourcePawn",
"bytes": "3657"
},
{
"name": "Stata",
"bytes": "48808"
},
{
"name": "SystemVerilog",
"bytes": "3612098"
},
{
"name": "Tcl",
"bytes": "16765"
},
{
"name": "TeX",
"bytes": "539642"
},
{
"name": "VHDL",
"bytes": "519227"
},
{
"name": "Verilog",
"bytes": "470753"
},
{
"name": "XS",
"bytes": "1205397"
},
{
"name": "XSLT",
"bytes": "12288"
},
{
"name": "Yacc",
"bytes": "38397"
}
],
"symlink_target": ""
}
|
import time
from json import dumps, loads
import warnings
from unittest import mock
from webtest import TestApp
from six import b as b_
from six import u as u_
import webob
from pecan import Pecan, expose, abort, Request, Response
from pecan.rest import RestController
from pecan.hooks import PecanHook, HookController
from pecan.tests import PecanTestCase
class TestThreadingLocalUsage(PecanTestCase):
@property
def root(self):
class RootController(object):
@expose()
def index(self, req, resp):
assert isinstance(req, webob.BaseRequest)
assert isinstance(resp, webob.Response)
return 'Hello, World!'
@expose()
def warning(self):
return ("This should be unroutable because (req, resp) are not"
" arguments. It should raise a TypeError.")
@expose(generic=True)
def generic(self):
return ("This should be unroutable because (req, resp) are not"
" arguments. It should raise a TypeError.")
@generic.when(method='PUT')
def generic_put(self, _id):
return ("This should be unroutable because (req, resp) are not"
" arguments. It should raise a TypeError.")
return RootController
def test_locals_are_not_used(self):
with mock.patch('threading.local', side_effect=AssertionError()):
app = TestApp(Pecan(self.root(), use_context_locals=False))
r = app.get('/')
assert r.status_int == 200
assert r.body == b_('Hello, World!')
self.assertRaises(AssertionError, Pecan, self.root)
def test_threadlocal_argument_warning(self):
with mock.patch('threading.local', side_effect=AssertionError()):
app = TestApp(Pecan(self.root(), use_context_locals=False))
self.assertRaises(
TypeError,
app.get,
'/warning/'
)
def test_threadlocal_argument_warning_on_generic(self):
with mock.patch('threading.local', side_effect=AssertionError()):
app = TestApp(Pecan(self.root(), use_context_locals=False))
self.assertRaises(
TypeError,
app.get,
'/generic/'
)
def test_threadlocal_argument_warning_on_generic_delegate(self):
with mock.patch('threading.local', side_effect=AssertionError()):
app = TestApp(Pecan(self.root(), use_context_locals=False))
self.assertRaises(
TypeError,
app.put,
'/generic/'
)
class TestIndexRouting(PecanTestCase):
@property
def app_(self):
class RootController(object):
@expose()
def index(self, req, resp):
assert isinstance(req, webob.BaseRequest)
assert isinstance(resp, webob.Response)
return 'Hello, World!'
return TestApp(Pecan(RootController(), use_context_locals=False))
def test_empty_root(self):
r = self.app_.get('/')
assert r.status_int == 200
assert r.body == b_('Hello, World!')
def test_index(self):
r = self.app_.get('/index')
assert r.status_int == 200
assert r.body == b_('Hello, World!')
def test_index_html(self):
r = self.app_.get('/index.html')
assert r.status_int == 200
assert r.body == b_('Hello, World!')
class TestManualResponse(PecanTestCase):
def test_manual_response(self):
class RootController(object):
@expose()
def index(self, req, resp):
resp = webob.Response(resp.environ)
resp.body = b_('Hello, World!')
return resp
app = TestApp(Pecan(RootController(), use_context_locals=False))
r = app.get('/')
assert r.body == b_('Hello, World!'), r.body
class TestDispatch(PecanTestCase):
@property
def app_(self):
class SubSubController(object):
@expose()
def index(self, req, resp):
assert isinstance(req, webob.BaseRequest)
assert isinstance(resp, webob.Response)
return '/sub/sub/'
@expose()
def deeper(self, req, resp):
assert isinstance(req, webob.BaseRequest)
assert isinstance(resp, webob.Response)
return '/sub/sub/deeper'
class SubController(object):
@expose()
def index(self, req, resp):
assert isinstance(req, webob.BaseRequest)
assert isinstance(resp, webob.Response)
return '/sub/'
@expose()
def deeper(self, req, resp):
assert isinstance(req, webob.BaseRequest)
assert isinstance(resp, webob.Response)
return '/sub/deeper'
sub = SubSubController()
class RootController(object):
@expose()
def index(self, req, resp):
assert isinstance(req, webob.BaseRequest)
assert isinstance(resp, webob.Response)
return '/'
@expose()
def deeper(self, req, resp):
assert isinstance(req, webob.BaseRequest)
assert isinstance(resp, webob.Response)
return '/deeper'
sub = SubController()
return TestApp(Pecan(RootController(), use_context_locals=False))
def test_index(self):
r = self.app_.get('/')
assert r.status_int == 200
assert r.body == b_('/')
def test_one_level(self):
r = self.app_.get('/deeper')
assert r.status_int == 200
assert r.body == b_('/deeper')
def test_one_level_with_trailing(self):
r = self.app_.get('/sub/')
assert r.status_int == 200
assert r.body == b_('/sub/')
def test_two_levels(self):
r = self.app_.get('/sub/deeper')
assert r.status_int == 200
assert r.body == b_('/sub/deeper')
def test_two_levels_with_trailing(self):
r = self.app_.get('/sub/sub/')
assert r.status_int == 200
def test_three_levels(self):
r = self.app_.get('/sub/sub/deeper')
assert r.status_int == 200
assert r.body == b_('/sub/sub/deeper')
class TestLookups(PecanTestCase):
@property
def app_(self):
class LookupController(object):
def __init__(self, someID):
self.someID = someID
@expose()
def index(self, req, resp):
return '/%s' % self.someID
@expose()
def name(self, req, resp):
return '/%s/name' % self.someID
class RootController(object):
@expose()
def index(self, req, resp):
return '/'
@expose()
def _lookup(self, someID, *remainder):
return LookupController(someID), remainder
return TestApp(Pecan(RootController(), use_context_locals=False))
def test_index(self):
r = self.app_.get('/')
assert r.status_int == 200
assert r.body == b_('/')
def test_lookup(self):
r = self.app_.get('/100/')
assert r.status_int == 200
assert r.body == b_('/100')
def test_lookup_with_method(self):
r = self.app_.get('/100/name')
assert r.status_int == 200
assert r.body == b_('/100/name')
def test_lookup_with_wrong_argspec(self):
class RootController(object):
@expose()
def _lookup(self, someID):
return 'Bad arg spec' # pragma: nocover
with warnings.catch_warnings():
warnings.simplefilter("ignore")
app = TestApp(Pecan(RootController(), use_context_locals=False))
r = app.get('/foo/bar', expect_errors=True)
assert r.status_int == 404
class TestCanonicalLookups(PecanTestCase):
@property
def app_(self):
class LookupController(object):
def __init__(self, someID):
self.someID = someID
@expose()
def index(self, req, resp):
return self.someID
class UserController(object):
@expose()
def _lookup(self, someID, *remainder):
return LookupController(someID), remainder
class RootController(object):
users = UserController()
return TestApp(Pecan(RootController(), use_context_locals=False))
def test_canonical_lookup(self):
assert self.app_.get('/users', expect_errors=404).status_int == 404
assert self.app_.get('/users/', expect_errors=404).status_int == 404
assert self.app_.get('/users/100').status_int == 302
assert self.app_.get('/users/100/').body == b_('100')
class TestControllerArguments(PecanTestCase):
@property
def app_(self):
class RootController(object):
@expose()
def index(self, req, resp, id):
return 'index: %s' % id
@expose()
def multiple(self, req, resp, one, two):
return 'multiple: %s, %s' % (one, two)
@expose()
def optional(self, req, resp, id=None):
return 'optional: %s' % str(id)
@expose()
def multiple_optional(self, req, resp, one=None, two=None,
three=None):
return 'multiple_optional: %s, %s, %s' % (one, two, three)
@expose()
def variable_args(self, req, resp, *args):
return 'variable_args: %s' % ', '.join(args)
@expose()
def variable_kwargs(self, req, resp, **kwargs):
data = [
'%s=%s' % (key, kwargs[key])
for key in sorted(kwargs.keys())
]
return 'variable_kwargs: %s' % ', '.join(data)
@expose()
def variable_all(self, req, resp, *args, **kwargs):
data = [
'%s=%s' % (key, kwargs[key])
for key in sorted(kwargs.keys())
]
return 'variable_all: %s' % ', '.join(list(args) + data)
@expose()
def eater(self, req, resp, id, dummy=None, *args, **kwargs):
data = [
'%s=%s' % (key, kwargs[key])
for key in sorted(kwargs.keys())
]
return 'eater: %s, %s, %s' % (
id,
dummy,
', '.join(list(args) + data)
)
@expose()
def _route(self, args, request):
if hasattr(self, args[0]):
return getattr(self, args[0]), args[1:]
else:
return self.index, args
return TestApp(Pecan(RootController(), use_context_locals=False))
def test_required_argument(self):
try:
r = self.app_.get('/')
assert r.status_int != 200 # pragma: nocover
except Exception as ex:
assert type(ex) == TypeError
assert ex.args[0] in (
"index() takes exactly 2 arguments (1 given)",
"index() missing 1 required positional argument: 'id'",
(
"TestControllerArguments.app_.<locals>.RootController."
"index() missing 1 required positional argument: 'id'"
),
) # this messaging changed in Python 3.3 and again in Python 3.10
def test_single_argument(self):
r = self.app_.get('/1')
assert r.status_int == 200
assert r.body == b_('index: 1')
def test_single_argument_with_encoded_url(self):
r = self.app_.get('/This%20is%20a%20test%21')
assert r.status_int == 200
assert r.body == b_('index: This is a test!')
def test_two_arguments(self):
r = self.app_.get('/1/dummy', status=404)
assert r.status_int == 404
def test_keyword_argument(self):
r = self.app_.get('/?id=2')
assert r.status_int == 200
assert r.body == b_('index: 2')
def test_keyword_argument_with_encoded_url(self):
r = self.app_.get('/?id=This%20is%20a%20test%21')
assert r.status_int == 200
assert r.body == b_('index: This is a test!')
def test_argument_and_keyword_argument(self):
r = self.app_.get('/3?id=three')
assert r.status_int == 200
assert r.body == b_('index: 3')
def test_encoded_argument_and_keyword_argument(self):
r = self.app_.get('/This%20is%20a%20test%21?id=three')
assert r.status_int == 200
assert r.body == b_('index: This is a test!')
def test_explicit_kwargs(self):
r = self.app_.post('/', {'id': '4'})
assert r.status_int == 200
assert r.body == b_('index: 4')
def test_path_with_explicit_kwargs(self):
r = self.app_.post('/4', {'id': 'four'})
assert r.status_int == 200
assert r.body == b_('index: 4')
def test_multiple_kwargs(self):
r = self.app_.get('/?id=5&dummy=dummy')
assert r.status_int == 200
assert r.body == b_('index: 5')
def test_kwargs_from_root(self):
r = self.app_.post('/', {'id': '6', 'dummy': 'dummy'})
assert r.status_int == 200
assert r.body == b_('index: 6')
# multiple args
def test_multiple_positional_arguments(self):
r = self.app_.get('/multiple/one/two')
assert r.status_int == 200
assert r.body == b_('multiple: one, two')
def test_multiple_positional_arguments_with_url_encode(self):
r = self.app_.get('/multiple/One%20/Two%21')
assert r.status_int == 200
assert r.body == b_('multiple: One , Two!')
def test_multiple_positional_arguments_with_kwargs(self):
r = self.app_.get('/multiple?one=three&two=four')
assert r.status_int == 200
assert r.body == b_('multiple: three, four')
def test_multiple_positional_arguments_with_url_encoded_kwargs(self):
r = self.app_.get('/multiple?one=Three%20&two=Four%20%21')
assert r.status_int == 200
assert r.body == b_('multiple: Three , Four !')
def test_positional_args_with_dictionary_kwargs(self):
r = self.app_.post('/multiple', {'one': 'five', 'two': 'six'})
assert r.status_int == 200
assert r.body == b_('multiple: five, six')
def test_positional_args_with_url_encoded_dictionary_kwargs(self):
r = self.app_.post('/multiple', {'one': 'Five%20', 'two': 'Six%20%21'})
assert r.status_int == 200
assert r.body == b_('multiple: Five%20, Six%20%21')
# optional arg
def test_optional_arg(self):
r = self.app_.get('/optional')
assert r.status_int == 200
assert r.body == b_('optional: None')
def test_multiple_optional(self):
r = self.app_.get('/optional/1')
assert r.status_int == 200
assert r.body == b_('optional: 1')
def test_multiple_optional_url_encoded(self):
r = self.app_.get('/optional/Some%20Number')
assert r.status_int == 200
assert r.body == b_('optional: Some Number')
def test_multiple_optional_missing(self):
r = self.app_.get('/optional/2/dummy', status=404)
assert r.status_int == 404
def test_multiple_with_kwargs(self):
r = self.app_.get('/optional?id=2')
assert r.status_int == 200
assert r.body == b_('optional: 2')
def test_multiple_with_url_encoded_kwargs(self):
r = self.app_.get('/optional?id=Some%20Number')
assert r.status_int == 200
assert r.body == b_('optional: Some Number')
def test_multiple_args_with_url_encoded_kwargs(self):
r = self.app_.get('/optional/3?id=three')
assert r.status_int == 200
assert r.body == b_('optional: 3')
def test_url_encoded_positional_args(self):
r = self.app_.get('/optional/Some%20Number?id=three')
assert r.status_int == 200
assert r.body == b_('optional: Some Number')
def test_optional_arg_with_kwargs(self):
r = self.app_.post('/optional', {'id': '4'})
assert r.status_int == 200
assert r.body == b_('optional: 4')
def test_optional_arg_with_url_encoded_kwargs(self):
r = self.app_.post('/optional', {'id': 'Some%20Number'})
assert r.status_int == 200
assert r.body == b_('optional: Some%20Number')
def test_multiple_positional_arguments_with_dictionary_kwargs(self):
r = self.app_.post('/optional/5', {'id': 'five'})
assert r.status_int == 200
assert r.body == b_('optional: 5')
def test_multiple_positional_url_encoded_arguments_with_kwargs(self):
r = self.app_.post('/optional/Some%20Number', {'id': 'five'})
assert r.status_int == 200
assert r.body == b_('optional: Some Number')
def test_optional_arg_with_multiple_kwargs(self):
r = self.app_.get('/optional?id=6&dummy=dummy')
assert r.status_int == 200
assert r.body == b_('optional: 6')
def test_optional_arg_with_multiple_url_encoded_kwargs(self):
r = self.app_.get('/optional?id=Some%20Number&dummy=dummy')
assert r.status_int == 200
assert r.body == b_('optional: Some Number')
def test_optional_arg_with_multiple_dictionary_kwargs(self):
r = self.app_.post('/optional', {'id': '7', 'dummy': 'dummy'})
assert r.status_int == 200
assert r.body == b_('optional: 7')
def test_optional_arg_with_multiple_url_encoded_dictionary_kwargs(self):
r = self.app_.post('/optional', {
'id': 'Some%20Number',
'dummy': 'dummy'
})
assert r.status_int == 200
assert r.body == b_('optional: Some%20Number')
# multiple optional args
def test_multiple_optional_positional_args(self):
r = self.app_.get('/multiple_optional')
assert r.status_int == 200
assert r.body == b_('multiple_optional: None, None, None')
def test_multiple_optional_positional_args_one_arg(self):
r = self.app_.get('/multiple_optional/1')
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, None, None')
def test_multiple_optional_positional_args_one_url_encoded_arg(self):
r = self.app_.get('/multiple_optional/One%21')
assert r.status_int == 200
assert r.body == b_('multiple_optional: One!, None, None')
def test_multiple_optional_positional_args_all_args(self):
r = self.app_.get('/multiple_optional/1/2/3')
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, 2, 3')
def test_multiple_optional_positional_args_all_url_encoded_args(self):
r = self.app_.get('/multiple_optional/One%21/Two%21/Three%21')
assert r.status_int == 200
assert r.body == b_('multiple_optional: One!, Two!, Three!')
def test_multiple_optional_positional_args_too_many_args(self):
r = self.app_.get('/multiple_optional/1/2/3/dummy', status=404)
assert r.status_int == 404
def test_multiple_optional_positional_args_with_kwargs(self):
r = self.app_.get('/multiple_optional?one=1')
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, None, None')
def test_multiple_optional_positional_args_with_url_encoded_kwargs(self):
r = self.app_.get('/multiple_optional?one=One%21')
assert r.status_int == 200
assert r.body == b_('multiple_optional: One!, None, None')
def test_multiple_optional_positional_args_with_string_kwargs(self):
r = self.app_.get('/multiple_optional/1?one=one')
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, None, None')
def test_multiple_optional_positional_args_with_encoded_str_kwargs(self):
r = self.app_.get('/multiple_optional/One%21?one=one')
assert r.status_int == 200
assert r.body == b_('multiple_optional: One!, None, None')
def test_multiple_optional_positional_args_with_dict_kwargs(self):
r = self.app_.post('/multiple_optional', {'one': '1'})
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, None, None')
def test_multiple_optional_positional_args_with_encoded_dict_kwargs(self):
r = self.app_.post('/multiple_optional', {'one': 'One%21'})
assert r.status_int == 200
assert r.body == b_('multiple_optional: One%21, None, None')
def test_multiple_optional_positional_args_and_dict_kwargs(self):
r = self.app_.post('/multiple_optional/1', {'one': 'one'})
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, None, None')
def test_multiple_optional_encoded_positional_args_and_dict_kwargs(self):
r = self.app_.post('/multiple_optional/One%21', {'one': 'one'})
assert r.status_int == 200
assert r.body == b_('multiple_optional: One!, None, None')
def test_multiple_optional_args_with_multiple_kwargs(self):
r = self.app_.get('/multiple_optional?one=1&two=2&three=3&four=4')
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, 2, 3')
def test_multiple_optional_args_with_multiple_encoded_kwargs(self):
r = self.app_.get(
'/multiple_optional?one=One%21&two=Two%21&three=Three%21&four=4'
)
assert r.status_int == 200
assert r.body == b_('multiple_optional: One!, Two!, Three!')
def test_multiple_optional_args_with_multiple_dict_kwargs(self):
r = self.app_.post(
'/multiple_optional',
{'one': '1', 'two': '2', 'three': '3', 'four': '4'}
)
assert r.status_int == 200
assert r.body == b_('multiple_optional: 1, 2, 3')
def test_multiple_optional_args_with_multiple_encoded_dict_kwargs(self):
r = self.app_.post(
'/multiple_optional',
{
'one': 'One%21',
'two': 'Two%21',
'three': 'Three%21',
'four': '4'
}
)
assert r.status_int == 200
assert r.body == b_('multiple_optional: One%21, Two%21, Three%21')
def test_multiple_optional_args_with_last_kwarg(self):
r = self.app_.get('/multiple_optional?three=3')
assert r.status_int == 200
assert r.body == b_('multiple_optional: None, None, 3')
def test_multiple_optional_args_with_last_encoded_kwarg(self):
r = self.app_.get('/multiple_optional?three=Three%21')
assert r.status_int == 200
assert r.body == b_('multiple_optional: None, None, Three!')
def test_multiple_optional_args_with_middle_arg(self):
r = self.app_.get('/multiple_optional', {'two': '2'})
assert r.status_int == 200
assert r.body == b_('multiple_optional: None, 2, None')
def test_variable_args(self):
r = self.app_.get('/variable_args')
assert r.status_int == 200
assert r.body == b_('variable_args: ')
def test_multiple_variable_args(self):
r = self.app_.get('/variable_args/1/dummy')
assert r.status_int == 200
assert r.body == b_('variable_args: 1, dummy')
def test_multiple_encoded_variable_args(self):
r = self.app_.get('/variable_args/Testing%20One%20Two/Three%21')
assert r.status_int == 200
assert r.body == b_('variable_args: Testing One Two, Three!')
def test_variable_args_with_kwargs(self):
r = self.app_.get('/variable_args?id=2&dummy=dummy')
assert r.status_int == 200
assert r.body == b_('variable_args: ')
def test_variable_args_with_dict_kwargs(self):
r = self.app_.post('/variable_args', {'id': '3', 'dummy': 'dummy'})
assert r.status_int == 200
assert r.body == b_('variable_args: ')
def test_variable_kwargs(self):
r = self.app_.get('/variable_kwargs')
assert r.status_int == 200
assert r.body == b_('variable_kwargs: ')
def test_multiple_variable_kwargs(self):
r = self.app_.get('/variable_kwargs/1/dummy', status=404)
assert r.status_int == 404
def test_multiple_variable_kwargs_with_explicit_kwargs(self):
r = self.app_.get('/variable_kwargs?id=2&dummy=dummy')
assert r.status_int == 200
assert r.body == b_('variable_kwargs: dummy=dummy, id=2')
def test_multiple_variable_kwargs_with_explicit_encoded_kwargs(self):
r = self.app_.get(
'/variable_kwargs?id=Two%21&dummy=This%20is%20a%20test'
)
assert r.status_int == 200
assert r.body == b_('variable_kwargs: dummy=This is a test, id=Two!')
def test_multiple_variable_kwargs_with_dict_kwargs(self):
r = self.app_.post('/variable_kwargs', {'id': '3', 'dummy': 'dummy'})
assert r.status_int == 200
assert r.body == b_('variable_kwargs: dummy=dummy, id=3')
def test_multiple_variable_kwargs_with_encoded_dict_kwargs(self):
r = self.app_.post(
'/variable_kwargs',
{'id': 'Three%21', 'dummy': 'This%20is%20a%20test'}
)
assert r.status_int == 200
result = 'variable_kwargs: dummy=This%20is%20a%20test, id=Three%21'
assert r.body == b_(result)
def test_variable_all(self):
r = self.app_.get('/variable_all')
assert r.status_int == 200
assert r.body == b_('variable_all: ')
def test_variable_all_with_one_extra(self):
r = self.app_.get('/variable_all/1')
assert r.status_int == 200
assert r.body == b_('variable_all: 1')
def test_variable_all_with_two_extras(self):
r = self.app_.get('/variable_all/2/dummy')
assert r.status_int == 200
assert r.body == b_('variable_all: 2, dummy')
def test_variable_mixed(self):
r = self.app_.get('/variable_all/3?month=1&day=12')
assert r.status_int == 200
assert r.body == b_('variable_all: 3, day=12, month=1')
def test_variable_mixed_explicit(self):
r = self.app_.get('/variable_all/4?id=four&month=1&day=12')
assert r.status_int == 200
assert r.body == b_('variable_all: 4, day=12, id=four, month=1')
def test_variable_post(self):
r = self.app_.post('/variable_all/5/dummy')
assert r.status_int == 200
assert r.body == b_('variable_all: 5, dummy')
def test_variable_post_with_kwargs(self):
r = self.app_.post('/variable_all/6', {'month': '1', 'day': '12'})
assert r.status_int == 200
assert r.body == b_('variable_all: 6, day=12, month=1')
def test_variable_post_mixed(self):
r = self.app_.post(
'/variable_all/7',
{'id': 'seven', 'month': '1', 'day': '12'}
)
assert r.status_int == 200
assert r.body == b_('variable_all: 7, day=12, id=seven, month=1')
def test_no_remainder(self):
try:
r = self.app_.get('/eater')
assert r.status_int != 200 # pragma: nocover
except Exception as ex:
assert type(ex) == TypeError
assert ex.args[0] in (
"eater() takes exactly 2 arguments (1 given)",
"eater() missing 1 required positional argument: 'id'",
(
"TestControllerArguments.app_.<locals>.RootController."
"eater() missing 1 required positional argument: 'id'"
),
) # this messaging changed in Python 3.3 and again in Python 3.10
def test_one_remainder(self):
r = self.app_.get('/eater/1')
assert r.status_int == 200
assert r.body == b_('eater: 1, None, ')
def test_two_remainders(self):
r = self.app_.get('/eater/2/dummy')
assert r.status_int == 200
assert r.body == b_('eater: 2, dummy, ')
def test_many_remainders(self):
r = self.app_.get('/eater/3/dummy/foo/bar')
assert r.status_int == 200
assert r.body == b_('eater: 3, dummy, foo, bar')
def test_remainder_with_kwargs(self):
r = self.app_.get('/eater/4?month=1&day=12')
assert r.status_int == 200
assert r.body == b_('eater: 4, None, day=12, month=1')
def test_remainder_with_many_kwargs(self):
r = self.app_.get('/eater/5?id=five&month=1&day=12&dummy=dummy')
assert r.status_int == 200
assert r.body == b_('eater: 5, dummy, day=12, month=1')
def test_post_remainder(self):
r = self.app_.post('/eater/6')
assert r.status_int == 200
assert r.body == b_('eater: 6, None, ')
def test_post_three_remainders(self):
r = self.app_.post('/eater/7/dummy')
assert r.status_int == 200
assert r.body == b_('eater: 7, dummy, ')
def test_post_many_remainders(self):
r = self.app_.post('/eater/8/dummy/foo/bar')
assert r.status_int == 200
assert r.body == b_('eater: 8, dummy, foo, bar')
def test_post_remainder_with_kwargs(self):
r = self.app_.post('/eater/9', {'month': '1', 'day': '12'})
assert r.status_int == 200
assert r.body == b_('eater: 9, None, day=12, month=1')
def test_post_many_remainders_with_many_kwargs(self):
r = self.app_.post(
'/eater/10',
{'id': 'ten', 'month': '1', 'day': '12', 'dummy': 'dummy'}
)
assert r.status_int == 200
assert r.body == b_('eater: 10, dummy, day=12, month=1')
class TestRestController(PecanTestCase):
@property
def app_(self):
class OthersController(object):
@expose()
def index(self, req, resp):
return 'OTHERS'
@expose()
def echo(self, req, resp, value):
return str(value)
class ThingsController(RestController):
data = ['zero', 'one', 'two', 'three']
_custom_actions = {'count': ['GET'], 'length': ['GET', 'POST']}
others = OthersController()
@expose()
def get_one(self, req, resp, id):
return self.data[int(id)]
@expose('json')
def get_all(self, req, resp):
return dict(items=self.data)
@expose()
def length(self, req, resp, id, value=None):
length = len(self.data[int(id)])
if value:
length += len(value)
return str(length)
@expose()
def post(self, req, resp, value):
self.data.append(value)
resp.status = 302
return 'CREATED'
@expose()
def edit(self, req, resp, id):
return 'EDIT %s' % self.data[int(id)]
@expose()
def put(self, req, resp, id, value):
self.data[int(id)] = value
return 'UPDATED'
@expose()
def get_delete(self, req, resp, id):
return 'DELETE %s' % self.data[int(id)]
@expose()
def delete(self, req, resp, id):
del self.data[int(id)]
return 'DELETED'
@expose()
def reset(self, req, resp):
return 'RESET'
@expose()
def post_options(self, req, resp):
return 'OPTIONS'
@expose()
def options(self, req, resp):
abort(500)
@expose()
def other(self, req, resp):
abort(500)
class RootController(object):
things = ThingsController()
# create the app
return TestApp(Pecan(RootController(), use_context_locals=False))
def test_get_all(self):
r = self.app_.get('/things')
assert r.status_int == 200
assert r.body == b_(dumps(dict(items=['zero', 'one', 'two', 'three'])))
def test_get_one(self):
for i, value in enumerate(['zero', 'one', 'two', 'three']):
r = self.app_.get('/things/%d' % i)
assert r.status_int == 200
assert r.body == b_(value)
def test_post(self):
r = self.app_.post('/things', {'value': 'four'})
assert r.status_int == 302
assert r.body == b_('CREATED')
def test_custom_action(self):
r = self.app_.get('/things/3/edit')
assert r.status_int == 200
assert r.body == b_('EDIT three')
def test_put(self):
r = self.app_.put('/things/3', {'value': 'THREE!'})
assert r.status_int == 200
assert r.body == b_('UPDATED')
def test_put_with_method_parameter_and_get(self):
r = self.app_.get('/things/3?_method=put', {'value': 'X'}, status=405)
assert r.status_int == 405
def test_put_with_method_parameter_and_post(self):
r = self.app_.post('/things/3?_method=put', {'value': 'THREE!'})
assert r.status_int == 200
assert r.body == b_('UPDATED')
def test_get_delete(self):
r = self.app_.get('/things/3/delete')
assert r.status_int == 200
assert r.body == b_('DELETE three')
def test_delete_method(self):
r = self.app_.delete('/things/3')
assert r.status_int == 200
assert r.body == b_('DELETED')
def test_delete_with_method_parameter(self):
r = self.app_.get('/things/3?_method=DELETE', status=405)
assert r.status_int == 405
def test_delete_with_method_parameter_and_post(self):
r = self.app_.post('/things/3?_method=DELETE')
assert r.status_int == 200
assert r.body == b_('DELETED')
def test_custom_method_type(self):
r = self.app_.request('/things', method='RESET')
assert r.status_int == 200
assert r.body == b_('RESET')
def test_custom_method_type_with_method_parameter(self):
r = self.app_.get('/things?_method=RESET')
assert r.status_int == 200
assert r.body == b_('RESET')
def test_options(self):
r = self.app_.request('/things', method='OPTIONS')
assert r.status_int == 200
assert r.body == b_('OPTIONS')
def test_options_with_method_parameter(self):
r = self.app_.post('/things', {'_method': 'OPTIONS'})
assert r.status_int == 200
assert r.body == b_('OPTIONS')
def test_other_custom_action(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
r = self.app_.request('/things/other', method='MISC', status=405)
assert r.status_int == 405
def test_other_custom_action_with_method_parameter(self):
r = self.app_.post('/things/other', {'_method': 'MISC'}, status=405)
assert r.status_int == 405
def test_nested_controller_with_trailing_slash(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
r = self.app_.request('/things/others/', method='MISC')
assert r.status_int == 200
assert r.body == b_('OTHERS')
def test_nested_controller_without_trailing_slash(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
r = self.app_.request('/things/others', method='MISC', status=302)
assert r.status_int == 302
def test_invalid_custom_action(self):
r = self.app_.get('/things?_method=BAD', status=405)
assert r.status_int == 405
def test_named_action(self):
# test custom "GET" request "length"
r = self.app_.get('/things/1/length')
assert r.status_int == 200
assert r.body == b_(str(len('one')))
def test_named_nested_action(self):
# test custom "GET" request through subcontroller
r = self.app_.get('/things/others/echo?value=test')
assert r.status_int == 200
assert r.body == b_('test')
def test_nested_post(self):
# test custom "POST" request through subcontroller
r = self.app_.post('/things/others/echo', {'value': 'test'})
assert r.status_int == 200
assert r.body == b_('test')
class TestHooks(PecanTestCase):
def test_basic_single_hook(self):
run_hook = []
class RootController(object):
@expose()
def index(self, req, resp):
run_hook.append('inside')
return 'Hello, World!'
class SimpleHook(PecanHook):
def on_route(self, state):
run_hook.append('on_route')
def before(self, state):
run_hook.append('before')
def after(self, state):
run_hook.append('after')
def on_error(self, state, e):
run_hook.append('error')
app = TestApp(Pecan(
RootController(),
hooks=[SimpleHook()],
use_context_locals=False
))
response = app.get('/')
assert response.status_int == 200
assert response.body == b_('Hello, World!')
assert len(run_hook) == 4
assert run_hook[0] == 'on_route'
assert run_hook[1] == 'before'
assert run_hook[2] == 'inside'
assert run_hook[3] == 'after'
def test_basic_multi_hook(self):
run_hook = []
class RootController(object):
@expose()
def index(self, req, resp):
run_hook.append('inside')
return 'Hello, World!'
class SimpleHook(PecanHook):
def __init__(self, id):
self.id = str(id)
def on_route(self, state):
run_hook.append('on_route' + self.id)
def before(self, state):
run_hook.append('before' + self.id)
def after(self, state):
run_hook.append('after' + self.id)
def on_error(self, state, e):
run_hook.append('error' + self.id)
app = TestApp(Pecan(RootController(), hooks=[
SimpleHook(1), SimpleHook(2), SimpleHook(3)
], use_context_locals=False))
response = app.get('/')
assert response.status_int == 200
assert response.body == b_('Hello, World!')
assert len(run_hook) == 10
assert run_hook[0] == 'on_route1'
assert run_hook[1] == 'on_route2'
assert run_hook[2] == 'on_route3'
assert run_hook[3] == 'before1'
assert run_hook[4] == 'before2'
assert run_hook[5] == 'before3'
assert run_hook[6] == 'inside'
assert run_hook[7] == 'after3'
assert run_hook[8] == 'after2'
assert run_hook[9] == 'after1'
def test_partial_hooks(self):
run_hook = []
class RootController(object):
@expose()
def index(self, req, resp):
run_hook.append('inside')
return 'Hello World!'
@expose()
def causeerror(self, req, resp):
return [][1]
class ErrorHook(PecanHook):
def on_error(self, state, e):
run_hook.append('error')
class OnRouteHook(PecanHook):
def on_route(self, state):
run_hook.append('on_route')
app = TestApp(Pecan(RootController(), hooks=[
ErrorHook(), OnRouteHook()
], use_context_locals=False))
response = app.get('/')
assert response.status_int == 200
assert response.body == b_('Hello World!')
assert len(run_hook) == 2
assert run_hook[0] == 'on_route'
assert run_hook[1] == 'inside'
run_hook = []
try:
response = app.get('/causeerror')
except Exception as e:
assert isinstance(e, IndexError)
assert len(run_hook) == 2
assert run_hook[0] == 'on_route'
assert run_hook[1] == 'error'
def test_on_error_response_hook(self):
run_hook = []
class RootController(object):
@expose()
def causeerror(self, req, resp):
return [][1]
class ErrorHook(PecanHook):
def on_error(self, state, e):
run_hook.append('error')
r = webob.Response()
r.text = u_('on_error')
return r
app = TestApp(Pecan(RootController(), hooks=[
ErrorHook()
], use_context_locals=False))
response = app.get('/causeerror')
assert len(run_hook) == 1
assert run_hook[0] == 'error'
assert response.text == 'on_error'
def test_prioritized_hooks(self):
run_hook = []
class RootController(object):
@expose()
def index(self, req, resp):
run_hook.append('inside')
return 'Hello, World!'
class SimpleHook(PecanHook):
def __init__(self, id, priority=None):
self.id = str(id)
if priority:
self.priority = priority
def on_route(self, state):
run_hook.append('on_route' + self.id)
def before(self, state):
run_hook.append('before' + self.id)
def after(self, state):
run_hook.append('after' + self.id)
def on_error(self, state, e):
run_hook.append('error' + self.id)
papp = Pecan(RootController(), hooks=[
SimpleHook(1, 3), SimpleHook(2, 2), SimpleHook(3, 1)
], use_context_locals=False)
app = TestApp(papp)
response = app.get('/')
assert response.status_int == 200
assert response.body == b_('Hello, World!')
assert len(run_hook) == 10
assert run_hook[0] == 'on_route3'
assert run_hook[1] == 'on_route2'
assert run_hook[2] == 'on_route1'
assert run_hook[3] == 'before3'
assert run_hook[4] == 'before2'
assert run_hook[5] == 'before1'
assert run_hook[6] == 'inside'
assert run_hook[7] == 'after1'
assert run_hook[8] == 'after2'
assert run_hook[9] == 'after3'
def test_basic_isolated_hook(self):
run_hook = []
class SimpleHook(PecanHook):
def on_route(self, state):
run_hook.append('on_route')
def before(self, state):
run_hook.append('before')
def after(self, state):
run_hook.append('after')
def on_error(self, state, e):
run_hook.append('error')
class SubSubController(object):
@expose()
def index(self, req, resp):
run_hook.append('inside_sub_sub')
return 'Deep inside here!'
class SubController(HookController):
__hooks__ = [SimpleHook()]
@expose()
def index(self, req, resp):
run_hook.append('inside_sub')
return 'Inside here!'
sub = SubSubController()
class RootController(object):
@expose()
def index(self, req, resp):
run_hook.append('inside')
return 'Hello, World!'
sub = SubController()
app = TestApp(Pecan(RootController(), use_context_locals=False))
response = app.get('/')
assert response.status_int == 200
assert response.body == b_('Hello, World!')
assert len(run_hook) == 1
assert run_hook[0] == 'inside'
run_hook = []
response = app.get('/sub/')
assert response.status_int == 200
assert response.body == b_('Inside here!')
assert len(run_hook) == 3
assert run_hook[0] == 'before'
assert run_hook[1] == 'inside_sub'
assert run_hook[2] == 'after'
run_hook = []
response = app.get('/sub/sub/')
assert response.status_int == 200
assert response.body == b_('Deep inside here!')
assert len(run_hook) == 3
assert run_hook[0] == 'before'
assert run_hook[1] == 'inside_sub_sub'
assert run_hook[2] == 'after'
def test_isolated_hook_with_global_hook(self):
run_hook = []
class SimpleHook(PecanHook):
def __init__(self, id):
self.id = str(id)
def on_route(self, state):
run_hook.append('on_route' + self.id)
def before(self, state):
run_hook.append('before' + self.id)
def after(self, state):
run_hook.append('after' + self.id)
def on_error(self, state, e):
run_hook.append('error' + self.id)
class SubController(HookController):
__hooks__ = [SimpleHook(2)]
@expose()
def index(self, req, resp):
run_hook.append('inside_sub')
return 'Inside here!'
class RootController(object):
@expose()
def index(self, req, resp):
run_hook.append('inside')
return 'Hello, World!'
sub = SubController()
app = TestApp(Pecan(
RootController(),
hooks=[SimpleHook(1)],
use_context_locals=False
))
response = app.get('/')
assert response.status_int == 200
assert response.body == b_('Hello, World!')
assert len(run_hook) == 4
assert run_hook[0] == 'on_route1'
assert run_hook[1] == 'before1'
assert run_hook[2] == 'inside'
assert run_hook[3] == 'after1'
run_hook = []
response = app.get('/sub/')
assert response.status_int == 200
assert response.body == b_('Inside here!')
assert len(run_hook) == 6
assert run_hook[0] == 'on_route1'
assert run_hook[1] == 'before2'
assert run_hook[2] == 'before1'
assert run_hook[3] == 'inside_sub'
assert run_hook[4] == 'after1'
assert run_hook[5] == 'after2'
class TestGeneric(PecanTestCase):
@property
def root(self):
class RootController(object):
def __init__(self, unique):
self.unique = unique
@expose(generic=True, template='json')
def index(self, req, resp):
assert self.__class__.__name__ == 'RootController'
assert isinstance(req, Request)
assert isinstance(resp, Response)
assert self.unique == req.headers.get('X-Unique')
return {'hello': 'world'}
@index.when(method='POST', template='json')
def index_post(self, req, resp):
assert self.__class__.__name__ == 'RootController'
assert isinstance(req, Request)
assert isinstance(resp, Response)
assert self.unique == req.headers.get('X-Unique')
return req.json
@expose(template='json')
def echo(self, req, resp):
assert self.__class__.__name__ == 'RootController'
assert isinstance(req, Request)
assert isinstance(resp, Response)
assert self.unique == req.headers.get('X-Unique')
return req.json
@expose(template='json')
def extra(self, req, resp, first, second):
assert self.__class__.__name__ == 'RootController'
assert isinstance(req, Request)
assert isinstance(resp, Response)
assert self.unique == req.headers.get('X-Unique')
return {'first': first, 'second': second}
return RootController
def test_generics_with_im_self_default(self):
uniq = str(time.time())
with mock.patch('threading.local', side_effect=AssertionError()):
app = TestApp(Pecan(self.root(uniq), use_context_locals=False))
r = app.get('/', headers={'X-Unique': uniq})
assert r.status_int == 200
json_resp = loads(r.body.decode())
assert json_resp['hello'] == 'world'
def test_generics_with_im_self_with_method(self):
uniq = str(time.time())
with mock.patch('threading.local', side_effect=AssertionError()):
app = TestApp(Pecan(self.root(uniq), use_context_locals=False))
r = app.post_json('/', {'foo': 'bar'}, headers={'X-Unique': uniq})
assert r.status_int == 200
json_resp = loads(r.body.decode())
assert json_resp['foo'] == 'bar'
def test_generics_with_im_self_with_path(self):
uniq = str(time.time())
with mock.patch('threading.local', side_effect=AssertionError()):
app = TestApp(Pecan(self.root(uniq), use_context_locals=False))
r = app.post_json('/echo/', {'foo': 'bar'},
headers={'X-Unique': uniq})
assert r.status_int == 200
json_resp = loads(r.body.decode())
assert json_resp['foo'] == 'bar'
def test_generics_with_im_self_with_extra_args(self):
uniq = str(time.time())
with mock.patch('threading.local', side_effect=AssertionError()):
app = TestApp(Pecan(self.root(uniq), use_context_locals=False))
r = app.get('/extra/123/456', headers={'X-Unique': uniq})
assert r.status_int == 200
json_resp = loads(r.body.decode())
assert json_resp['first'] == '123'
assert json_resp['second'] == '456'
|
{
"content_hash": "1ed6f442ea1dae7c89d6563f8f196371",
"timestamp": "",
"source": "github",
"line_count": 1448,
"max_line_length": 79,
"avg_line_length": 34.533149171270715,
"alnum_prop": 0.540976721862251,
"repo_name": "pecan/pecan",
"id": "3fbcd8877f0feb9c2fd5d4108b4859b17f4551fb",
"size": "50004",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pecan/tests/test_no_thread_locals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "569"
},
{
"name": "HTML",
"bytes": "4281"
},
{
"name": "Python",
"bytes": "458451"
}
],
"symlink_target": ""
}
|
from postgresapi import models, managers
from . import _base
class CreateTestCase(_base.TestCase):
def setUp(self):
super(CreateTestCase, self).setUp()
self._drop_test_db()
def tearDown(self):
super(CreateTestCase, self).tearDown()
self._drop_test_db()
def test_success(self):
with self.app.app_context():
manager = managers.SharedManager()
manager.create_instance('databasenotexist')
db = self.create_db()
with db.transaction() as cursor:
cursor.execute('SELECT name, state, plan FROM instance')
self.assertEqual(cursor.fetchall(),
[('databasenotexist', 'running', 'shared')])
def test_already_exists(self):
db = self.create_db()
manager = managers.SharedManager()
with db.transaction() as cursor:
cursor.execute(
"INSERT INTO instance (name, state, plan) VALUES "
"('databasenotexist', 'running', 'shared')")
with self.app.app_context():
self.assertRaises(managers.InstanceAlreadyExists,
manager.create_instance,
'databasenotexist')
with db.autocommit() as cursor:
cursor.execute('CREATE ROLE databaseno_group')
cursor.execute('CREATE DATABASE databasenotexist '
'OWNER databaseno_group')
with self.app.app_context():
self.assertRaises(managers.InstanceAlreadyExists,
manager.create_instance,
'databasenotexist')
|
{
"content_hash": "52f3de113a44bde4fcee1ca2a784484c",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 73,
"avg_line_length": 36.08695652173913,
"alnum_prop": 0.5662650602409639,
"repo_name": "RichardKnop/postgres-api",
"id": "ea9aa8a90513465a803f75d5290b9c41d7e6e156",
"size": "1685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_create.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48060"
},
{
"name": "Shell",
"bytes": "1011"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class ShowticklabelsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="showticklabels",
parent_name="sunburst.marker.colorbar",
**kwargs
):
super(ShowticklabelsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
{
"content_hash": "5388b2ee4b9015440a5e625ab7302a60",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 77,
"avg_line_length": 30.823529411764707,
"alnum_prop": 0.5935114503816794,
"repo_name": "plotly/python-api",
"id": "13f7f5dff1723f212e54432b50ccbcc70fb88207",
"size": "524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/sunburst/marker/colorbar/_showticklabels.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
'''
Created on 2013-12-22
@author: Wei
'''
import utils.serialize
from utils.rst_lib import *
class CRFTreeFeatureWriter:
def __init__(self, verbose):
self.features = set()
self.verbose = verbose
self.cue_phrases = utils.serialize.loadData('cue_phrases')
if self.verbose:
print 'Loaded %d cue phrases for CRF labeling' % len(self.cue_phrases)
def write_organization_features(self, constituent, scope, unit, position):
'''
1. number of EDUs in unit1 or unit2.
2. number of tokens in unit1 or unit2.
3. distance of unit1 in EDUs to the beginning (or to the end) of the sentence.
4. distance of unit2 in EDUs to the beginning (or to the end) of the sentence.
'''
num_edus = constituent.get_num_edus()
if scope:
assert constituent.start_sent_id == constituent.end_sent_id
start_edu_offset = constituent.l_start - constituent.doc.sentences[constituent.start_sent_id].start_edu
end_edu_offset = constituent.doc.sentences[constituent.end_sent_id].end_edu - constituent.r_end
if start_edu_offset == 0:
self.features.add("First_EDU_Unit%d@%d" % (unit, position))
if end_edu_offset == 0:
self.features.add("Last_EDU_Unit%d@%d" % (unit,position))
subtree_height = constituent.get_subtree_height()
if subtree_height == 1:
self.features.add('Bottom_Level_Subtree_Unit%d@%d' % (unit, position))
return num_edus, subtree_height
def write_Ngram_features(self, constituent, unit, position):
'''
N = 1, 2, 3
1. Beginning (or end) lexical N-grams in unit 1.
2. Beginning (or end) lexical N-grams in unit 2.
3. Beginning (or end) POS N-grams in unit 1.
4. Beginning (or end) POS N-grams in unit 2.
'''
for n in range(1, 4):
# if self.verbose:
# print 'stump', stump
# print breaks
# print offset
# print
pref_PoS_ngrams = constituent.get_POS_ngram(n)
self.features.add('Beginning_POS_%d-grams_Unit%d=%s@%d' % (n, unit, '-'.join(pref_PoS_ngrams), position))
pref_lexical_ngrams = constituent.get_ngram(n)
self.features.add('Beginning_Lexical_%d-grams_Unit%d=%s@%d' % (n, unit, '-'.join(pref_lexical_ngrams), position))
end_lexical_ngrams = constituent.get_ngram(-n)
self.features.add('End_Lexical_Lexical_%d-grams_Unit%d=%s@%d' % (n, unit, '-'.join(end_lexical_ngrams), position))
end_PoS_ngrams = constituent.get_POS_ngram(-n)
self.features.add('End_POS_%d-grams_Unit%d=%s@%d' % (n, unit, '-'.join(end_PoS_ngrams), position))
def write_dominance_set_features(self, L, R, position):
assert L.doc == R.doc
l_start_sent = L.start_sent_id
l_start_word = L.start_word
l_end_sent = L.end_sent_id
l_end_word = L.end_word
r_start_sent = R.start_sent_id
r_start_word = R.start_word
r_end_sent = R.end_sent_id
r_end_word = R.end_word
l_subtrees_top_tags = []
if l_start_sent == l_end_sent:
t = L.doc.sentences[l_start_sent].parse_tree
l_ancestor_pos = t.treeposition_spanning_leaves(l_start_word, l_end_word)
if l_end_word == l_start_word + 1:
l_ancestor_pos = l_ancestor_pos[ : -1]
l_ancestor_subtree = t[l_ancestor_pos]
self.features.add('Top_syntactic_tag_Unit1=%s@%d' % (l_ancestor_subtree.node, position))
if len(l_ancestor_subtree.leaves()) == l_end_word - l_start_word:
self.features.add('Valid_syntax_subtree_Unit1@%d' % position)
l_subtrees = utils.utils.get_syntactic_subtrees(t, l_start_word, l_end_word)
self.features.add('Num_Syntax_subtrees_Unit1=%d@%d' % (len(l_subtrees), position))
if len(l_subtrees) == 1:
self.features.add('Top_Syntax_tag_Unit1=%s@%d' % (l_subtrees[0].node, position))
l_subtree_top_tags = []
for (i, subtree) in enumerate(l_subtrees):
l_subtree_top_tags.append(subtree.node)
l_subtrees_top_tags.append(l_subtree_top_tags)
else:
l_ancestor_pos = ()
r_subtrees_top_tags = []
if r_start_sent == r_end_sent:
t = R.doc.sentences[r_start_sent].parse_tree
r_ancestor_pos = t.treeposition_spanning_leaves(r_start_word, r_end_word)
if r_end_word == r_start_word + 1:
r_ancestor_pos = r_ancestor_pos[ : -1]
r_ancestor_subtree = t[r_ancestor_pos]
self.features.add('Top_syntactic_tag_Unit2=%s@%d' % (r_ancestor_subtree.node, position))
if len(r_ancestor_subtree.leaves()) == r_end_word - r_start_word:
self.features.add('Valid_syntax_subtree_Unit2@%d' % position)
r_subtrees = utils.utils.get_syntactic_subtrees(t, r_start_word, r_end_word)
self.features.add('Num_Syntax_subtrees_Unit2=%d@%d' % (len(r_subtrees), position))
if len(r_subtrees) == 1:
self.features.add('Top_Syntax_tag_Unit2=%s@%d' % (r_subtrees[0].node, position))
r_subtree_top_tags = []
for (i, subtree) in enumerate(r_subtrees):
r_subtree_top_tags.append(subtree.node)
r_subtrees_top_tags.append(r_subtree_top_tags)
else:
r_ancestor_pos = ()
min_top_tags_edit_distance = 1.0
for l_top_tags in l_subtrees_top_tags:
for r_top_tags in r_subtrees_top_tags:
distance = utils.utils.compute_edit_distance(l_top_tags, r_top_tags)
distance_norm = distance * 1.0 / max(len(l_top_tags), len(r_top_tags))
min_top_tags_edit_distance = min(min_top_tags_edit_distance, distance_norm)
self.features.add('Min_Subtrees_Top_Syntactic_Tags_Distance=%.3f@%d' % (min_top_tags_edit_distance,
position))
if l_start_sent != r_end_sent:
return
t = L.doc.sentences[l_start_sent].parse_tree
common_ancestor_pos = common_ancestor(l_ancestor_pos, r_ancestor_pos)
dist_ancestor_l = len(l_ancestor_pos) - len(common_ancestor_pos)
dist_ancestor_r = len(r_ancestor_pos) - len(common_ancestor_pos)
if dist_ancestor_l:
head = filter_lexical_head(t.get_head(l_ancestor_pos))
self.features.add('Top_lexical_head_Unit1=%s@%d' % (head, position))
self.features.add('Dist_ancestor_norm_Unit1=%s@%d' % (dist_ancestor_l/float(len(common_ancestor_pos)), position))
if dist_ancestor_r:
head = filter_lexical_head(t.get_head(r_ancestor_pos))
self.features.add('Top_lexical_head_Unit2=%s@%d' % (head, position))
self.features.add('Dist_ancestor_norm_Unit2=%s@%d' % (dist_ancestor_l/float(len(common_ancestor_pos)), position))
if common_ancestor_pos:
syntax_tree = t
head_pos = syntax_tree[common_ancestor_pos].head
if head_pos >= l_end_word:
self.features.add('Head_in_R@%d' % position)
else:
self.features.add('Head_in_L@%d' % position)
if dist_ancestor_l == 0 or dist_ancestor_r == 0:
if dist_ancestor_l == 0:# L >> R
self.features.add('L_Dominates_R@%d' % position)
dom_pos = r_ancestor_pos[:-1]
else: # R >> L
self.features.add('R_Dominates_L@%d' % position)
dom_pos = l_ancestor_pos[:-1]
head = filter_lexical_head(syntax_tree.get_head(dom_pos))
# if head and head in self.lexical_heads:
if head:
self.features.add('Dominated_lexical_head=%s@%d' % (head, position))
tag = filter_syntactic_tag(syntax_tree.get_syntactic_tag(dom_pos))
if tag :
self.features.add('Dominated_Syntactic_tag=%s@%d' % (tag, position))
def write_substructure_features(self, constituent, unit = 1, position = 0):
self.features.add("Unit%d_Subtree_Rel_Root=%s@%d" % (unit, constituent.get_subtree_rel(), position))
def write_text_structureal_features(self, constituent, unit, position):
'''
Number of sentences in unit 1 (or unit 2).
'''
start_sent = constituent.start_sent_id
end_sent = constituent.end_sent_id
if start_sent == 0:
self.features.add('First_Sentence_Unit%d@%d' % (unit, position))
if end_sent == len(constituent.doc.sentences) - 1:
self.features.add('Last_Sentence_Unit%d@%d' % (unit, position))
num_sents = end_sent - start_sent + 1
num_paragraphs = 0
for i in range(constituent.l_start, constituent.r_end):
# print constituent.doc.edus[i]
if constituent.doc.edus[i][-1] == '<P>':
num_paragraphs += 1
return num_sents, num_paragraphs
def write_cue_phrase_features(self, constituent, unit = 1, position = 0):
if not constituent.is_leaf():
edus = [constituent.doc.edus[constituent.l_start], constituent.doc.edus[constituent.r_end - 1]]
else:
edus = [constituent.doc.edus[constituent.l_start]]
candidates = []
for edu in edus:
candidates.append(' ' + ' '.join(edu).lower().replace('<s>', '').replace('<p>', '') + ' ')
for cue_phrase in self.cue_phrases:
for (i, cand_span) in enumerate(candidates):
cue_position = 'Beginning' if i == 0 else 'Ending'
pos = cand_span.find(" " + cue_phrase + " ")
if pos >= 0:
if i == 0 and pos < 3:
self.features.add('Unit%d_%s_Cue_Phrase=%s@%d' % (unit, cue_position, cue_phrase.replace(' ', '#'), position))
elif i == 1 and cand_span[pos : ].split(' ') <= 3:
self.features.add('Unit%d_%s_Cue_Phrase=%s@%d' % (unit, cue_position, cue_phrase.replace(' ', '#'), position))
def write_features_for_constituents(self, constituents, positions, scope, labeling):
self.features.clear()
for (i, position) in enumerate(positions):
L = constituents[i]
R = constituents[i + 1]
if L and R:
# print 'c1:', constituent1.print_span(), 'c2:', constituent2.print_span()
l_subtree_height, l_num_edus = self.write_organization_features(L, scope, 1, position)
r_subtree_height, r_num_edus = self.write_organization_features(R, scope, 2, position)
if l_subtree_height < r_subtree_height:
self.features.add('L_Lower_Subtree_Height_Than_R@%d' % position)
elif r_subtree_height < l_subtree_height:
self.features.add('R_Lower_Subtree_Height_Than_L@%d' % position)
else:
self.features.add('L_R_Same_Subtree_Height@%d' % position)
if l_num_edus < r_num_edus:
self.features.add('L_Fewer_EDUs_Than_R@%d' % position)
elif r_num_edus < l_num_edus:
self.features.add('R_Fewer_EDUs_Than_L@%d' % position)
else:
self.features.add('L_R_Same_Subtree_Height@%d' % position)
self.write_Ngram_features(L, 1, position)
self.write_Ngram_features(R, 2, position)
self.write_dominance_set_features(L, R, position)
if not scope:
l_num_sents, l_num_paragraphs = self.write_text_structureal_features(L, 1, position)
r_num_sents, r_num_paragraphs = self.write_text_structureal_features(R, 2, position)
if l_num_sents < r_num_sents:
self.features.add('L_Fewer_Num_Sentences_Than_R@%d' % position)
elif r_num_sents < l_num_sents:
self.features.add('R_Fewer_Num_Sentences_Than_L@%d' % position)
else:
self.features.add('L_R_Same_Num_Sentences@%d' % position)
if l_num_paragraphs < r_num_paragraphs:
self.features.add('L_Fewer_Num_Paragraphs_Than_R@%d' % position)
elif r_num_paragraphs < l_num_paragraphs:
self.features.add('R_Fewer_Num_Paragraphs_Than_L@%d' % position)
else:
self.features.add('L_R_Same_Num_Paragraphs@%d' % position)
self.write_substructure_features(L, 1, position)
self.write_substructure_features(R, 2, position)
self.write_cue_phrase_features(L, 1, position)
self.write_cue_phrase_features(R, 2, position)
l_num_tokens = L.get_num_tokens()
r_num_tokens = R.get_num_tokens()
if l_num_tokens * 1.5 < r_num_tokens:
self.features.add('L_Fewer_Num_Tokens_Than_R@%d' % position)
elif r_num_tokens * 1.5 < l_num_tokens:
self.features.add('R_Fewer_Num_Tokens_Than_L@%d' % position)
else:
self.features.add('L_R_Same_Num_Tokens@%d' % position)
if scope:
assert L.start_sent_id == L.end_sent_id
sent_num_edus = L.doc.sentences[L.start_sent_id].end_edu - L.doc.sentences[L.start_sent_id].start_edu
if (L.get_num_edus() + R.get_num_edus()) == sent_num_edus:
self.features.add('Last_Pair@%d' % position)
else:
if (L.get_num_edus() + R.get_num_edus()) == len(L.doc.edus):
self.features.add('Last_Pair@%d' % position)
return self.features
|
{
"content_hash": "52d1decffc1addefd8f67f51f3f556fa",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 134,
"avg_line_length": 44.7514450867052,
"alnum_prop": 0.5044562128648928,
"repo_name": "habernal/feng-hirst-rst-parser-acl-2014",
"id": "395fa288ce5b082b62ba2e5ea70ee65bef69d4a7",
"size": "15484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/features/tree_feature_writer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "154260"
}
],
"symlink_target": ""
}
|
from nova.notifications.objects import base
from nova.objects import base as nova_base
from nova.objects import fields
@nova_base.NovaObjectRegistry.register_notification
class KeypairPayload(base.NotificationPayloadBase):
SCHEMA = {
'user_id': ('keypair', 'user_id'),
'name': ('keypair', 'name'),
'public_key': ('keypair', 'public_key'),
'fingerprint': ('keypair', 'fingerprint'),
'type': ('keypair', 'type')
}
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'user_id': fields.StringField(nullable=True),
'name': fields.StringField(nullable=False),
'fingerprint': fields.StringField(nullable=True),
'public_key': fields.StringField(nullable=True),
'type': fields.StringField(nullable=False),
}
def __init__(self, keypair, **kwargs):
super(KeypairPayload, self).__init__(**kwargs)
self.populate_schema(keypair=keypair)
@base.notification_sample('keypair-create-start.json')
@base.notification_sample('keypair-create-end.json')
@nova_base.NovaObjectRegistry.register_notification
class KeypairNotification(base.NotificationBase):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'payload': fields.ObjectField('KeypairPayload')
}
|
{
"content_hash": "d8347c3017f9ae6228eaa42edb5a6881",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 57,
"avg_line_length": 33.35897435897436,
"alnum_prop": 0.6617986164488855,
"repo_name": "jianghuaw/nova",
"id": "44df1a0732b268e4e77426042a70fe570263db2f",
"size": "1874",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/notifications/objects/keypair.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1435"
},
{
"name": "PHP",
"bytes": "32515"
},
{
"name": "Python",
"bytes": "19932348"
},
{
"name": "Shell",
"bytes": "28290"
},
{
"name": "Smarty",
"bytes": "339635"
}
],
"symlink_target": ""
}
|
import uuid
import copy
import sys
import os
import random
from collections import defaultdict
import prettytable
from output import (LargeItem,
IngredientBase,
Liquid,
return_instance,
is_ingredient_in_list)
from pie_logger import get_logger
log = get_logger()
class Recipe:
def __init__(self, pie_instance, path):
self.ingredients = {}
self.steps = {}
self.pie_class_name = type(pie_instance).__name__
self.path = path
# read_recipe
self.read_recipe()
# get title
self.get_title()
# get crust and filling
self.get_crust_filling()
self.get_ingredients_as_list("filling")
self.get_the_steps_as_list("filling")
self.get_ingredients_as_list("crust")
self.get_the_steps_as_list("crust")
def get_ingredients_as_list(self, which):
recipe_part = getattr(self, which)
ingredients = recipe_part.split("\n\n")[0]
self.ingredients[which] = ingredients.split("\n")
def get_the_steps_as_list(self, which):
recipe_part = getattr(self, which)
self.steps[which] = recipe_part.split("\n\n")[1:]
def read_recipe(self):
self.recipe_text = open(self.path).read()
def get_title(self, split_on="CRUST"):
recipe = self.recipe_text
self.title = recipe.split(split_on)[0].strip()
def get_crust_filling(self, split_on="CRUST", and_on="FILLING"):
crust_and_filling = self.recipe_text.split(split_on)[1].strip()
crust, filling = crust_and_filling.split(and_on)
self.crust = self.remmove_first_character(crust)
self.filling = self.remmove_first_character(filling)
def remmove_first_character(self, subject_string):
return subject_string[1:].strip()
def make_shopping_list(self):
shopping_list = []
for part in self.as_dict()['Parts']:
for ingredient in part['ingredients']:
instance = return_instance(ingredient)
shopping_list.append(instance)
return shopping_list
def as_dict(self):
return {"Title": self.title,
"Parts": [
{"sub-title": "filling",
"ingredients": self.ingredients.get("filling"),
"steps": self.steps.get("filling")},
{"sub-title": "crust",
"ingredients": self.ingredients.get("crust"),
"steps": self.steps.get("crust")}]}
class Pie:
has_top_crust = True
has_fried = False
bake_time = 4000 # ms
def __init__(self, name, recipe_path=""):
"""construct the Pie class """
self.name = name
self.crust = None
self.filling = None
self.recipe_path = recipe_path
self.recipe = None
self.shopping_list = []
self.unique_pie_id = str(uuid.uuid4())
def process_recipe(self):
"process_recipe() method to make shopping list/steps for the pie"
self.recipe = Recipe(self, self.recipe_path)
self.shopping_list = self.recipe.make_shopping_list()
def get_filling(self):
return self.filling
class ApplePie(Pie):
has_fruit = True
image_key = "A"
class CherryPie(Pie):
has_fruit = True
image_key = "B"
class RaseberryPie(Pie):
has_fruit = True
image_key = "C"
class FactoryConveyorBelt:
"Handles holding state for pies and adding callbacks to handle operations"
def __init__(self):
self.pies = {}
self.oven_heat_time = 10000
self.inventory = []
self.pie_orders_qty = {}
self.callbacks = defaultdict(list)
self.known_callback_methods = ('bake', 'reload', 'oven', 'restock')
def fill_pantry(self, pies, times=5):
" Given a list of 'pies', duplicate items times 'times', adds to self.inventory"
log.debug("restock")
if type(pies) != list:
pies = [pies, ]
key_item_for_inventory = self.key_item_for_inventory()
for pie in pies:
inventory = copy.copy(pie.shopping_list)
for item in inventory:
try:
key_item_for_inventory[item.item].qty *= times
except KeyError:
item.qty *= times
self.inventory.append(item)
def get_totals(self):
"shows totals a s pretty ascii-format table"
out = {}
for x in self.inventory:
if x.item in out:
out[x.item].qty += x.qty
else:
out[x.item] = copy.copy(x)
return self.pretty_display_ingredients(out.values())
@staticmethod
def truncate(input, length=25):
"just add '...' after truncating a string at 'length'"
if len(input) > length:
return input[:length] + "..."
return input
@staticmethod
def humanize(frac):
"conert a fraction to wholes and fractions when approriate"
whole = " "
part = "{}/{}".format(frac.numerator, frac.denominator)
if frac.numerator > frac.denominator:
whole = int(frac.numerator / frac.denominator)
frac -= whole
part = ""
if frac.numerator != 0:
part = "{}/{}".format(frac.numerator, frac.denominator)
return "{} {}".format(whole, part)
@staticmethod
def pretty_display_ingredients(ingredients):
"given 'ingredients' display as pretty table"
out = defaultdict(list)
for ingr in ingredients:
as_str = "{} {} of {}".format(FactoryConveyorBelt.humanize(ingr.qty),
ingr.unit,
ingr.item)
out[ingr.name].append(FactoryConveyorBelt.truncate(as_str))
row_size = max(map(len, out.values()))
for row in out.values():
row += [""] * (row_size - len(row))
table = prettytable.PrettyTable()
for header, listings in out.items():
table.add_column(header, listings)
return table.get_string()
def key_item_for_inventory(self):
"give current inventory as `dict`"
out = {}
for ingr in self.inventory:
out[ingr.item] = ingr
return out
def add_pie(self, pie):
"add a single pie to the FactoryConveyorBelt"
self.pies[pie.unique_pie_id] = pie
# calculate ingredients
inventory_by_key = self.key_item_for_inventory()
for ingrd in pie.shopping_list:
if (inventory_by_key[ingrd.item].qty - ingrd.qty) < 0:
raise Exception("out of {}".format(ingrd.item))
inventory_by_key[ingrd.item].qty -= ingrd.qty
def add_pie_order(self, pie, qty):
"add a bunch of pies to FactoryConveyorBelt"
pie.process_recipe()
self.pie_orders_qty[pie.unique_pie_id] = qty
self.add_pie(pie)
def add_callback(self, method, func):
"add a callback function for given 'method'"
if method not in self.known_callback_methods:
raise Exception("unkown callback method: {}".format(method))
self.callbacks[method].append(func)
def run_belt(self, runner=None):
"run the FactoryConveyorBelt in production"
if runner:
return runner(belt=self)
import jupy
return jupy.run_flask_socket_app(belt=self)
def run_belt_test(self):
"simulate running the FactoryConveyorBelt"
import mock_browser
mock_browser.simulate(self, count=4)
def run_belt(test=False, runner=None):
belt = FactoryConveyorBelt()
pie = ApplePie(name="Prototype Apple Pie", recipe_path="misc/ApplePie.txt")
pie.process_recipe()
belt.fill_pantry(pie, times=10)
if test:
belt.add_pie_order(pie, 3)
log.debug("totals: ")
log.debug(belt.get_totals())
def echo_callback(callback_app, message):
callback_app.logger.info("echo {}".format(message))
for method in belt.known_callback_methods:
belt.add_callback(method, echo_callback)
names = ["Bob", "Sue", "Pap", "Karen", "Brian", "Greg"]
def bake_callback(callback_app, message):
callback_app.logger.info("bake callback")
baketype = message['baketype']
if baketype == 'apple':
pie_type = ApplePie
elif baketype == 'cherry':
pie_type = CherryPie
elif baketype == 'raseberry':
pie_type = RaseberryPie
else:
raise Exception("unknown bake type {}".format(message['baketype']))
pie = pie_type(name="{}'s {} Pie".format(random.choice(names), baketype.title()),
recipe_path="misc/ApplePie.txt")
pie.process_recipe()
try:
callback_app.belt.add_pie(pie)
except Exception as e:
return dict(error=str(e))
totals = callback_app.belt.get_totals()
return dict(image_key=pie.image_key,
totals=totals,
name=pie.name,
unique_pie_id=pie.unique_pie_id)
belt.add_callback("bake", bake_callback)
def oven_callback(callback_app, message):
callback_app.logger.info("message {}".format(message))
pie = callback_app.belt.pies[message['unique_pie_id']]
total_time = message['heat_time'] + callback_app.belt.oven_heat_time
totals = callback_app.belt.get_totals()
return dict(image_key=pie.image_key,
totals=totals,
oven_msg="Oven Heating",
name=pie.name,
bake_time=pie.bake_time,
total_time=total_time,
unique_pie_id=pie.unique_pie_id)
belt.add_callback("oven", oven_callback)
def restock_callback(callback_app, message):
callback_app.logger.info("message {}".format(message))
pie2 = ApplePie(name="Prototype Apple Pie",
recipe_path="misc/ApplePie.txt")
pie2.process_recipe()
callback_app.belt.fill_pantry(pie2, times=2)
totals = callback_app.belt.get_totals()
return dict(msg="restocked",
totals=totals)
belt.add_callback("restock", restock_callback)
if test:
return belt.run_belt_test()
else:
return belt.run_belt(runner=runner)
def run_detached():
import subprocess
return subprocess.Popen([sys.executable, os.path.realpath(__file__)],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def stop_detached(process):
import signal
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
base_class = object
try:
import IPython
import IPython.core.display
base_class = IPython.core.display.HTML
except:
print("Unexpected error:", sys.exc_info()[0])
def give_me_iframe(src, width="100%", height=525):
frame_html = """<iframe id="jupy" scrolling="no" style="border:none;"
seamless="seamless"
src="{src}"
height="{height}" width="{width}">
</iframe>""".format(**locals())
return frame_html
class JupyDisplay(base_class):
"""
"""
def __init__(self, url, width="100%", height=525):
self.resource = url
self.embed_code = give_me_iframe(url, width=width, height=height)
super(JupyDisplay, self).__init__(data=self.embed_code)
def _repr_html_(self):
return self.embed_code
jupyter = False
try:
__IPYTHON__
jupyter = True
except NameError:
pass
if __name__ == "__main__" and not jupyter:
import argparse
parser = argparse.ArgumentParser(description='Complete pie belt')
parser.add_argument(
'--test', action="store_true", help='test the belt code',)
args = parser.parse_args()
test = False
if args.test:
log.info("running in test mode")
test = True
else:
log.info("running in single server mode")
run_belt(test=test)
|
{
"content_hash": "974bef65ec519d8bff81a327899c8b0c",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 89,
"avg_line_length": 31.83812010443864,
"alnum_prop": 0.5743808430375594,
"repo_name": "brianray/chipy_phaser_flask",
"id": "91f236764614c660d4fa595cbd6a7a032d6ccf03",
"size": "12195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "complete_pie.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "15363"
},
{
"name": "Python",
"bytes": "20298"
}
],
"symlink_target": ""
}
|
import sys
from django.utils.six.moves import input
from kolibri.core.auth.models import AdHocGroup
from kolibri.core.auth.models import FacilityDataset
from kolibri.core.auth.models import FacilityUser
from kolibri.core.auth.models import Membership
from kolibri.core.logger.models import AttemptLog
from kolibri.core.logger.models import ContentSessionLog
from kolibri.core.logger.models import ContentSummaryLog
from kolibri.core.logger.models import MasteryLog
from kolibri.core.logger.models import UserSessionLog
def confirm_or_exit(message):
answer = ""
while answer not in ["yes", "n", "no"]:
answer = input("{} [Type 'yes' or 'no'.] ".format(message)).lower()
if answer != "yes":
print("Canceled! Exiting without touching the database.")
sys.exit(1)
def create_adhoc_group_for_learners(classroom, learners):
adhoc_group = AdHocGroup.objects.create(name="", parent=classroom)
for learner in learners:
Membership.objects.create(user=learner, collection=adhoc_group)
return adhoc_group
def _merge_user_models(source_user, target_user):
for f in ["gender", "birth_year", "id_number"]:
source_value = getattr(source_user, f, None)
target_value = getattr(target_user, f, None)
if not target_value and source_value:
setattr(target_user, f, source_value)
target_user.save()
blocklist = set(["id", "_morango_partition"])
def merge_users(source_user, target_user):
"""
Utility to merge two users. It makes no assumptions about whether
the users are in the same facility and does raw copies of all
associated user data, rather than try to do anything clever.
"""
if source_user.id == target_user.id:
raise ValueError("Cannot merge a user with themselves")
_merge_user_models(source_user, target_user)
id_map = {
FacilityUser: {source_user.id: target_user.id},
FacilityDataset: {
source_user.dataset_id: target_user.dataset_id,
},
}
def _merge_log_data(LogModel):
log_map = {}
id_map[LogModel] = log_map
new_logs = []
related_fields = [f for f in LogModel._meta.concrete_fields if f.is_relation]
source_logs = LogModel.objects.filter(user=source_user)
target_log_ids = set(
LogModel.objects.filter(user=target_user).values_list("id", flat=True)
)
for log in source_logs:
# Get all serialializable fields
data = log.serialize()
# Remove fields that we explicitly know we don't want to copy
for f in blocklist:
if f in data:
del data[f]
# Iterate through each relation and map the old id to the new id for the foreign key
for relation in related_fields:
data[relation.attname] = id_map[relation.related_model][
data[relation.attname]
]
# If this is a randomly created source id, preserve it, so we can stop the same logs
# being copied in repeatedly. If it is not random, remove it, so we can recreate
# it on the target.
if log.calculate_source_id() is not None:
del data["_morango_source_id"]
new_log = LogModel.deserialize(data)
if not new_log._morango_source_id:
new_log.id = new_log.calculate_uuid()
else:
# Have to do this, otherwise morango will overwrite the current source id on the model
new_log.id = new_log.compute_namespaced_id(
new_log.calculate_partition(),
new_log._morango_source_id,
new_log.morango_model_name,
)
new_log._morango_partition = new_log.calculate_partition().replace(
new_log.ID_PLACEHOLDER, new_log.id
)
log_map[log.id] = new_log.id
if new_log.id not in target_log_ids:
new_logs.append(new_log)
LogModel.objects.bulk_create(new_logs, batch_size=750)
_merge_log_data(ContentSessionLog)
_merge_log_data(ContentSummaryLog)
_merge_log_data(UserSessionLog)
_merge_log_data(MasteryLog)
_merge_log_data(AttemptLog)
|
{
"content_hash": "929194d3becb93d2ca69c4cccce78f5d",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 102,
"avg_line_length": 37.8859649122807,
"alnum_prop": 0.6253762445010419,
"repo_name": "indirectlylit/kolibri",
"id": "594037c533329e3f68ec137593f17039b24dd4d3",
"size": "4319",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kolibri/core/auth/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2554964"
},
{
"name": "Dockerfile",
"bytes": "4114"
},
{
"name": "Gherkin",
"bytes": "365088"
},
{
"name": "HTML",
"bytes": "24294"
},
{
"name": "JavaScript",
"bytes": "1613945"
},
{
"name": "Makefile",
"bytes": "11953"
},
{
"name": "Python",
"bytes": "2860587"
},
{
"name": "SCSS",
"bytes": "5225"
},
{
"name": "Shell",
"bytes": "5245"
},
{
"name": "Vue",
"bytes": "1604613"
}
],
"symlink_target": ""
}
|
"""
(c) RIKEN 2015. All rights reserved.
Author: Keitaro Yamashita
This software is released under the new BSD License; see LICENSE.
"""
from __future__ import print_function
from __future__ import unicode_literals
import sys
from cctbx.array_family import flex
from cctbx import miller
from yamtbx.dataproc.xds.xds_ascii import XDS_ASCII
def run(lstin):
data = []
for l in open(lstin):
xdsasc = l.strip()
xa = XDS_ASCII(xdsasc, sys.stdout, i_only=True)
ma = miller.array(miller_set=xa.as_miller_set(anomalous_flag=False),
data=xa.iobs)
data.append((xdsasc, ma))
print("index filename")
for i, d in enumerate(data):
print(i, d[0])
print("i j n.i n.j n.common cc")
for i in range(len(data)-1):
for j in range(i+1, len(data)):
di, dj = data[i][1].common_sets(data[j][1], assert_is_similar_symmetry=False)
print(i, j, data[i][1].data().size(), data[j][1].data().size(), end=' ')
if len(di.data()) == 0:
print(0, "nan")
else:
corr = flex.linear_correlation(di.data(), dj.data())
assert corr.is_well_defined()
cc = corr.coefficient()
print(len(di.data()), cc)
# run()
if __name__ == "__main__":
lst = sys.argv[1]
run(lst)
|
{
"content_hash": "b97e40aac77c39d9da14e41d0282491a",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 89,
"avg_line_length": 30.244444444444444,
"alnum_prop": 0.5635562086700955,
"repo_name": "keitaroyam/yamtbx",
"id": "de9bdef6e5072ef12e269d7e87c127e9b9a498a4",
"size": "1361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yamtbx/dataproc/xds/command_line/xds_pairwise_cc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "12583"
},
{
"name": "JavaScript",
"bytes": "8399"
},
{
"name": "Python",
"bytes": "1735917"
},
{
"name": "Shell",
"bytes": "2591"
}
],
"symlink_target": ""
}
|
import mock
from oslo.config import cfg
from webob import exc
from neutron.common import constants
from neutron.db import api as db_api
from neutron.db import external_net_db
from neutron.db import l3_db
from neutron.db import l3_gwmode_db
from neutron.db import models_v2
from neutron.extensions import l3
from neutron.extensions import l3_ext_gw_mode
from neutron.openstack.common import uuidutils
from neutron.tests import base
from neutron.tests.unit import test_db_plugin
from neutron.tests.unit import test_l3_plugin
_uuid = uuidutils.generate_uuid
FAKE_GW_PORT_ID = _uuid()
FAKE_GW_PORT_MAC = 'aa:bb:cc:dd:ee:ff'
FAKE_FIP_EXT_PORT_ID = _uuid()
FAKE_FIP_EXT_PORT_MAC = '11:22:33:44:55:66'
FAKE_FIP_INT_PORT_ID = _uuid()
FAKE_FIP_INT_PORT_MAC = 'aa:aa:aa:aa:aa:aa'
FAKE_ROUTER_PORT_ID = _uuid()
FAKE_ROUTER_PORT_MAC = 'bb:bb:bb:bb:bb:bb'
class TestExtensionManager(object):
def get_resources(self):
# Simulate extension of L3 attribute map
for key in l3.RESOURCE_ATTRIBUTE_MAP.keys():
l3.RESOURCE_ATTRIBUTE_MAP[key].update(
l3_ext_gw_mode.EXTENDED_ATTRIBUTES_2_0.get(key, {}))
return l3.L3.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
# A simple class for making a concrete class out of the mixin
# for the case of a plugin that integrates l3 routing.
class TestDbIntPlugin(test_l3_plugin.TestL3NatIntPlugin,
l3_gwmode_db.L3_NAT_db_mixin):
supported_extension_aliases = ["external-net", "router", "ext-gw-mode"]
# A simple class for making a concrete class out of the mixin
# for the case of a l3 router service plugin
class TestDbSepPlugin(test_l3_plugin.TestL3NatServicePlugin,
l3_gwmode_db.L3_NAT_db_mixin):
supported_extension_aliases = ["router", "ext-gw-mode"]
class TestL3GwModeMixin(base.BaseTestCase):
def setUp(self):
super(TestL3GwModeMixin, self).setUp()
self.target_object = TestDbIntPlugin()
# Patch the context
ctx_patcher = mock.patch('neutron.context', autospec=True)
mock_context = ctx_patcher.start()
self.addCleanup(db_api.clear_db)
self.addCleanup(ctx_patcher.stop)
self.context = mock_context.get_admin_context()
# This ensure also calls to elevated work in unit tests
self.context.elevated.return_value = self.context
self.context.session = db_api.get_session()
# Create sample data for tests
self.ext_net_id = _uuid()
self.int_net_id = _uuid()
self.int_sub_id = _uuid()
self.tenant_id = 'the_tenant'
self.network = models_v2.Network(
id=self.ext_net_id,
tenant_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE)
self.net_ext = external_net_db.ExternalNetwork(
network_id=self.ext_net_id)
self.context.session.add(self.network)
# The following is to avoid complains from sqlite on
# foreign key violations
self.context.session.flush()
self.context.session.add(self.net_ext)
self.router = l3_db.Router(
id=_uuid(),
name=None,
tenant_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE,
enable_snat=True,
gw_port_id=None)
self.context.session.add(self.router)
self.context.session.flush()
self.router_gw_port = models_v2.Port(
id=FAKE_GW_PORT_ID,
tenant_id=self.tenant_id,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_GW,
admin_state_up=True,
status=constants.PORT_STATUS_ACTIVE,
mac_address=FAKE_GW_PORT_MAC,
network_id=self.ext_net_id)
self.router.gw_port_id = self.router_gw_port.id
self.context.session.add(self.router)
self.context.session.add(self.router_gw_port)
self.context.session.flush()
self.fip_ext_port = models_v2.Port(
id=FAKE_FIP_EXT_PORT_ID,
tenant_id=self.tenant_id,
admin_state_up=True,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_FLOATINGIP,
status=constants.PORT_STATUS_ACTIVE,
mac_address=FAKE_FIP_EXT_PORT_MAC,
network_id=self.ext_net_id)
self.context.session.add(self.fip_ext_port)
self.context.session.flush()
self.int_net = models_v2.Network(
id=self.int_net_id,
tenant_id=self.tenant_id,
admin_state_up=True,
status=constants.NET_STATUS_ACTIVE)
self.int_sub = models_v2.Subnet(
id=self.int_sub_id,
tenant_id=self.tenant_id,
ip_version=4,
cidr='3.3.3.0/24',
gateway_ip='3.3.3.1',
network_id=self.int_net_id)
self.router_port = models_v2.Port(
id=FAKE_ROUTER_PORT_ID,
tenant_id=self.tenant_id,
admin_state_up=True,
device_id=self.router.id,
device_owner=l3_db.DEVICE_OWNER_ROUTER_INTF,
status=constants.PORT_STATUS_ACTIVE,
mac_address=FAKE_ROUTER_PORT_MAC,
network_id=self.int_net_id)
self.router_port_ip_info = models_v2.IPAllocation(
port_id=self.router_port.id,
network_id=self.int_net.id,
subnet_id=self.int_sub_id,
ip_address='3.3.3.1')
self.context.session.add(self.int_net)
self.context.session.add(self.int_sub)
self.context.session.add(self.router_port)
self.context.session.add(self.router_port_ip_info)
self.context.session.flush()
self.fip_int_port = models_v2.Port(
id=FAKE_FIP_INT_PORT_ID,
tenant_id=self.tenant_id,
admin_state_up=True,
device_id='something',
device_owner='compute:nova',
status=constants.PORT_STATUS_ACTIVE,
mac_address=FAKE_FIP_INT_PORT_MAC,
network_id=self.int_net_id)
self.fip_int_ip_info = models_v2.IPAllocation(
port_id=self.fip_int_port.id,
network_id=self.int_net.id,
subnet_id=self.int_sub_id,
ip_address='3.3.3.3')
self.fip = l3_db.FloatingIP(
id=_uuid(),
floating_ip_address='1.1.1.2',
floating_network_id=self.ext_net_id,
floating_port_id=FAKE_FIP_EXT_PORT_ID,
fixed_port_id=None,
fixed_ip_address=None,
router_id=None)
self.context.session.add(self.fip_int_port)
self.context.session.add(self.fip_int_ip_info)
self.context.session.add(self.fip)
self.context.session.flush()
self.fip_request = {'port_id': FAKE_FIP_INT_PORT_ID,
'tenant_id': self.tenant_id}
def _reset_ext_gw(self):
# Reset external gateway
self.router.gw_port_id = None
self.context.session.add(self.router)
self.context.session.flush()
def _test_update_router_gw(self, gw_info, expected_enable_snat):
self.target_object._update_router_gw_info(
self.context, self.router.id, gw_info)
router = self.target_object._get_router(
self.context, self.router.id)
try:
self.assertEqual(FAKE_GW_PORT_ID,
router.gw_port.id)
self.assertEqual(FAKE_GW_PORT_MAC,
router.gw_port.mac_address)
except AttributeError:
self.assertIsNone(router.gw_port)
self.assertEqual(expected_enable_snat, router.enable_snat)
def test_update_router_gw_with_gw_info_none(self):
self._test_update_router_gw(None, True)
def test_update_router_gw_with_network_only(self):
info = {'network_id': self.ext_net_id}
self._test_update_router_gw(info, True)
def test_update_router_gw_with_snat_disabled(self):
info = {'network_id': self.ext_net_id,
'enable_snat': False}
self._test_update_router_gw(info, False)
def test_make_router_dict_no_ext_gw(self):
self._reset_ext_gw()
router_dict = self.target_object._make_router_dict(self.router)
self.assertIsNone(router_dict[l3.EXTERNAL_GW_INFO])
def test_make_router_dict_with_ext_gw(self):
router_dict = self.target_object._make_router_dict(self.router)
self.assertEqual({'network_id': self.ext_net_id,
'enable_snat': True},
router_dict[l3.EXTERNAL_GW_INFO])
def test_make_router_dict_with_ext_gw_snat_disabled(self):
self.router.enable_snat = False
router_dict = self.target_object._make_router_dict(self.router)
self.assertEqual({'network_id': self.ext_net_id,
'enable_snat': False},
router_dict[l3.EXTERNAL_GW_INFO])
def test_build_routers_list_no_ext_gw(self):
self._reset_ext_gw()
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list([router_dict], [])
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNone(router.get('gw_port'))
self.assertIsNone(router.get('enable_snat'))
def test_build_routers_list_with_ext_gw(self):
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(
[router_dict], [self.router.gw_port])
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNotNone(router.get('gw_port'))
self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id'])
self.assertTrue(router.get('enable_snat'))
def test_build_routers_list_with_ext_gw_snat_disabled(self):
self.router.enable_snat = False
router_dict = self.target_object._make_router_dict(self.router)
routers = self.target_object._build_routers_list(
[router_dict], [self.router.gw_port])
self.assertEqual(1, len(routers))
router = routers[0]
self.assertIsNotNone(router.get('gw_port'))
self.assertEqual(FAKE_GW_PORT_ID, router['gw_port']['id'])
self.assertFalse(router.get('enable_snat'))
class ExtGwModeIntTestCase(test_db_plugin.NeutronDbPluginV2TestCase,
test_l3_plugin.L3NatTestCaseMixin):
def setUp(self, plugin=None, svc_plugins=None, ext_mgr=None):
# Store l3 resource attribute map as it will be updated
self._l3_attribute_map_bk = {}
for item in l3.RESOURCE_ATTRIBUTE_MAP:
self._l3_attribute_map_bk[item] = (
l3.RESOURCE_ATTRIBUTE_MAP[item].copy())
plugin = plugin or (
'neutron.tests.unit.test_extension_ext_gw_mode.TestDbIntPlugin')
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
ext_mgr = ext_mgr or TestExtensionManager()
super(ExtGwModeIntTestCase, self).setUp(plugin=plugin,
ext_mgr=ext_mgr,
service_plugins=svc_plugins)
self.addCleanup(self.restore_l3_attribute_map)
def restore_l3_attribute_map(self):
l3.RESOURCE_ATTRIBUTE_MAP = self._l3_attribute_map_bk
def tearDown(self):
super(ExtGwModeIntTestCase, self).tearDown()
def _set_router_external_gateway(self, router_id, network_id,
snat_enabled=None,
expected_code=exc.HTTPOk.code,
neutron_context=None):
ext_gw_info = {'network_id': network_id}
# Need to set enable_snat also if snat_enabled == False
if snat_enabled is not None:
ext_gw_info['enable_snat'] = snat_enabled
return self._update('routers', router_id,
{'router': {'external_gateway_info':
ext_gw_info}},
expected_code=expected_code,
neutron_context=neutron_context)
def test_router_create_show_no_ext_gwinfo(self):
name = 'router1'
tenant_id = _uuid()
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info', None)]
with self.router(name=name, admin_state_up=True,
tenant_id=tenant_id) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
def _test_router_create_show_ext_gwinfo(self, snat_input_value,
snat_expected_value):
name = 'router1'
tenant_id = _uuid()
with self.subnet() as s:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
input_value = {'network_id': ext_net_id}
if snat_input_value in (True, False):
input_value['enable_snat'] = snat_input_value
expected_value = [('name', name), ('tenant_id', tenant_id),
('admin_state_up', True), ('status', 'ACTIVE'),
('external_gateway_info',
{'network_id': ext_net_id,
'enable_snat': snat_expected_value})]
with self.router(
name=name, admin_state_up=True, tenant_id=tenant_id,
external_gateway_info=input_value) as router:
res = self._show('routers', router['router']['id'])
for k, v in expected_value:
self.assertEqual(res['router'][k], v)
def test_router_create_show_ext_gwinfo_default(self):
self._test_router_create_show_ext_gwinfo(None, True)
def test_router_create_show_ext_gwinfo_with_snat_enabled(self):
self._test_router_create_show_ext_gwinfo(True, True)
def test_router_create_show_ext_gwinfo_with_snat_disabled(self):
self._test_router_create_show_ext_gwinfo(False, False)
def _test_router_update_ext_gwinfo(self, snat_input_value,
snat_expected_value=False,
expected_http_code=exc.HTTPOk.code):
with self.router() as r:
with self.subnet() as s:
try:
ext_net_id = s['subnet']['network_id']
self._set_net_external(ext_net_id)
self._set_router_external_gateway(
r['router']['id'], ext_net_id,
snat_enabled=snat_input_value,
expected_code=expected_http_code)
if expected_http_code != exc.HTTPOk.code:
return
body = self._show('routers', r['router']['id'])
res_gw_info = body['router']['external_gateway_info']
self.assertEqual(res_gw_info['network_id'], ext_net_id)
self.assertEqual(res_gw_info['enable_snat'],
snat_expected_value)
finally:
self._remove_external_gateway_from_router(
r['router']['id'], ext_net_id)
def test_router_update_ext_gwinfo_default(self):
self._test_router_update_ext_gwinfo(None, True)
def test_router_update_ext_gwinfo_with_snat_enabled(self):
self._test_router_update_ext_gwinfo(True, True)
def test_router_update_ext_gwinfo_with_snat_disabled(self):
self._test_router_update_ext_gwinfo(False, False)
def test_router_update_ext_gwinfo_with_invalid_snat_setting(self):
self._test_router_update_ext_gwinfo(
'xxx', None, expected_http_code=exc.HTTPBadRequest.code)
class ExtGwModeSepTestCase(ExtGwModeIntTestCase):
def setUp(self, plugin=None):
# Store l3 resource attribute map as it will be updated
self._l3_attribute_map_bk = {}
for item in l3.RESOURCE_ATTRIBUTE_MAP:
self._l3_attribute_map_bk[item] = (
l3.RESOURCE_ATTRIBUTE_MAP[item].copy())
plugin = plugin or (
'neutron.tests.unit.test_l3_plugin.TestNoL3NatPlugin')
# the L3 service plugin
l3_plugin = ('neutron.tests.unit.test_extension_ext_gw_mode.'
'TestDbSepPlugin')
svc_plugins = {'l3_plugin_name': l3_plugin}
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
super(ExtGwModeSepTestCase, self).setUp(plugin=plugin,
svc_plugins=svc_plugins)
self.addCleanup(self.restore_l3_attribute_map)
|
{
"content_hash": "81abdb8521115389507e51a1f80b4979",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 77,
"avg_line_length": 42.57920792079208,
"alnum_prop": 0.5835367980467387,
"repo_name": "Comcast/neutron",
"id": "ac2fd5171e0d2cec743bbcc46fe93018602bf5e0",
"size": "17930",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/test_extension_ext_gw_mode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "6906340"
},
{
"name": "Shell",
"bytes": "8983"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
import logging
try:
import torch
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
except ImportError:
# No installation required if not using this function
pass
from nlpaug.model.lang_models import LanguageModels
import nlpaug.util.text.tokenizer as text_tokenizer
class Bart(LanguageModels):
# https://arxiv.org/pdf/1910.13461.pdf
UNKNOWN_TOKEN = '<unk>'
def __init__(self, model_path='facebook/bart-large-cnn', min_length=10, max_length=20, num_beam=3, no_repeat_ngram_size=3,
device='cuda', silence=True):
super().__init__(device, temperature=None, top_k=None, top_p=None, silence=True)
try:
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
except ModuleNotFoundError:
raise ModuleNotFoundError('Missed transformers library. Install transfomers by `pip install transformers`')
self.model_path = model_path
self.min_length = min_length
self.max_length = max_length
self.num_beam = num_beam
self.no_repeat_ngram_size = no_repeat_ngram_size
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
if silence:
# Transformers thrown an warning regrading to weight initialization. It is expected
orig_log_level = logging.getLogger('transformers.' + 'modeling_utils').getEffectiveLevel()
logging.getLogger('transformers.' + 'modeling_utils').setLevel(logging.ERROR)
self.model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
logging.getLogger('transformers.' + 'modeling_utils').setLevel(orig_log_level)
else:
self.model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
self.model.to(self.device)
self.model.eval()
self.return_tensor = 'pt' # PyTorch
self.early_stopping = True
self.skip_special_token = True
self.default_max_length_ratio = 0.5
def get_model(self):
return self.model
def get_tokenizer(self):
return self.tokenizer
def get_subword_prefix(self):
return self.SUBWORD_PREFIX
def get_mask_token(self):
return self.MASK_TOKEN
def predict(self, texts, n=1):
# Convert to feature
inputs = self.tokenizer(texts, padding='longest', return_tensors=self.return_tensor)
token_inputs = inputs['input_ids'].to(self.device)
mask_inputs = inputs['attention_mask'].to(self.device)
# Prediction
min_length = min([len(text) for text in texts])
min_length = self.get_min_length(min_length)
max_length = max([len(text) for text in texts])
max_length = self.get_max_length(max_length)
results = []
with torch.no_grad():
outputs = self.model.generate(input_ids=token_inputs, attention_mask=mask_inputs,
min_length=min_length, max_length=max_length, num_beams=self.num_beam,
no_repeat_ngram_size=self.no_repeat_ngram_size)
for target_token_ids in outputs:
tokens = self.tokenizer.decode(target_token_ids, skip_special_tokens=self.skip_special_token)
# Return full sentence only.
for i in range(len(tokens)-1, -1, -1):
if tokens[i] in text_tokenizer.SENTENCE_SEPARATOR:
results.append(tokens[:i+1])
break
return results
def get_min_length(self, min_length):
return int(min_length * self.min_length) if self.min_length < 1 else self.min_length
def get_max_length(self, max_length):
if self.max_length < 1:
return int(max_length * self.max_length)
else:
if max_length >= self.max_length:
return int(max_length * self.default_max_length_ratio)
else:
return self.max_length
|
{
"content_hash": "bc7bc43fac067a0d4e4593778595a245",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 127,
"avg_line_length": 38.03921568627451,
"alnum_prop": 0.6376288659793814,
"repo_name": "makcedward/nlpaug",
"id": "05d2e0e9ccc7ae082790ca56b9a93c33321c373a",
"size": "3880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nlpaug/model/lang_models/bart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "777279"
},
{
"name": "Python",
"bytes": "512156"
},
{
"name": "Shell",
"bytes": "2004"
}
],
"symlink_target": ""
}
|
import commands
def test(data_path):
# parameters
folder_data = ['dmri/']
file_data = ['bvecs.txt']
# define command
cmd = 'sct_dmri_transpose_bvecs -i ' + data_path + folder_data[0] + file_data[0]
# return
return commands.getstatusoutput(cmd)
if __name__ == "__main__":
# call main function
test()
|
{
"content_hash": "d8c0b9931510fdb14ec6af0215161701",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 84,
"avg_line_length": 17.94736842105263,
"alnum_prop": 0.5953079178885631,
"repo_name": "3324fr/spinalcordtoolbox",
"id": "e7e66a7d4e4689b53f9081ca3e0fff5b2f80c62a",
"size": "866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/test_sct_dmri_transpose_bvecs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5961"
},
{
"name": "C++",
"bytes": "1025992"
},
{
"name": "CMake",
"bytes": "18919"
},
{
"name": "CSS",
"bytes": "1384"
},
{
"name": "Groff",
"bytes": "3141"
},
{
"name": "HTML",
"bytes": "5315"
},
{
"name": "JavaScript",
"bytes": "2505"
},
{
"name": "KiCad",
"bytes": "5522"
},
{
"name": "Matlab",
"bytes": "275100"
},
{
"name": "Python",
"bytes": "4808677"
},
{
"name": "Shell",
"bytes": "193192"
}
],
"symlink_target": ""
}
|
import json
import re
import click
import jsonschema
import utils
@click.command()
@click.argument("schema", type=click.File("r"), required=True)
@click.argument("jsonfiles", type=click.Path(exists=True), required=True)
def validate_path(schema, jsonfiles):
schema = json.loads(schema.read())
for path in utils.get_files(jsonfiles):
path_components = utils.get_path_parts(path)
regex = schema[path_components[0]]
if not re.compile(regex).match(path):
raise AssertionError('Path "%s" does not match spec "%s"' % (path, regex))
if __name__ == "__main__":
validate_path()
|
{
"content_hash": "a39bb6e340ffa3ca2a09f4561f337bee",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 86,
"avg_line_length": 25,
"alnum_prop": 0.6672,
"repo_name": "OpenBounds/Processing",
"id": "b8b5df26064903cb6c7020e2781daea3bd2791bb",
"size": "648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "validate_paths.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "50689"
}
],
"symlink_target": ""
}
|
"""
Most of the jokes are from https://viccfaktor.hu/cimke/programozo-viccek/, and https://gremmedia.hu/programozo-viccek
//
A viccek nagy része a https://viccfaktor.hu/cimke/programozo-viccek/ weboldalról és a https://gremmedia.hu/programozo-viccek weboldalról származik
"""
neutral = [
"- Miért keveri a programozó a Halloween-t a Karácsonnyal? - ??? - Mert az egyik oct 31 a másik meg dec 25.",
"Két programozó beszélget: - Milyen a kedved? - Egész változó... - Integer???",
"- Mire táncolnak a programozók a buliban? - Algoritmusra.",
"10-féle ember létezik. Az egyik ismeri a bináris számrendszert, a másik nem.",
"- Hogyan próbálnak meggazdagodni az objektumorientált programozók? - ??? - Öröklődés által.",
"- Mi a teendő tűz esetén? - ??? - 1. `git add .` 2. `git commit` 3. `git push` 4. hagyjuk el az épületet",
"- Mit csinál a backend fejlesztő, amikor sikeresen megváltoztat valamit CSS-ben? - ??? - Beírja az önéletrajzába, hogy full stack fejlesztő",
"- Hogy hívsz nyolc hobbitot? - ??? - Egy hobbyte."
]
chuck = [
"- Miért Chuck Norris a világ legjobb programozója? - ??? - Mert olyan kódot ír, ami saját magát képes optimalizálni."
]
jokes_hu = {
'neutral': neutral,
'chuck': chuck,
'all': neutral + chuck,
}
|
{
"content_hash": "c4d8080a497c30571b5ee261dbe2ddd4",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 146,
"avg_line_length": 49.07692307692308,
"alnum_prop": 0.6841692789968652,
"repo_name": "pyjokes/pyjokes",
"id": "ce9de8a063551e97f1ec3b2c37a587a56420937c",
"size": "1355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyjokes/jokes_hu.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1928"
},
{
"name": "Python",
"bytes": "56293"
}
],
"symlink_target": ""
}
|
import StringIO
import json
import logging
import random
import urllib
import urllib2
import multipart
# стандартные модули для Google Engine
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
import webapp2
# Две следующие строчки необходимы для иморта сторонних библиотек из папки libs
#import sys
#sys.path.insert(0, 'libs')
# Импорт сторонних модулей
# Например
# import vkontakte
#Для удобства все команды можно вывести в словарь
messages = {
'start_message': ('Мой список команд:\n'
+'/about - информация о боте\n'
+'/stop - остановить бот\n'),
'stop_message':('Пока!\n'
+'/start - запустить бот'),
'about_message': ("Информация о боте\n"
+"Автор: \n"
+"Версия: \n"),
}
TOKEN = 'ВСТАВЬТЕ ТОКЕН ВАШЕГО БОТА'
BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/'
# ================================
class EnableStatus(ndb.Model):
# key name: str(chat_id)
enabled = ndb.BooleanProperty(indexed=False, default=False)
# ================================
def setEnabled(chat_id, yes):
es = EnableStatus.get_or_insert(str(chat_id))
es.enabled = yes
es.put()
def getEnabled(chat_id):
es = EnableStatus.get_by_id(str(chat_id))
if es:
return es.enabled
return False
# ================================
class MeHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe'))))
class GetUpdatesHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates'))))
class SetWebhookHandler(webapp2.RequestHandler):
def get(self):
urlfetch.set_default_fetch_deadline(60)
url = self.request.get('url')
if url:
self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url})))))
class WebhookHandler(webapp2.RequestHandler):
def post(self):
urlfetch.set_default_fetch_deadline(60)
body = json.loads(self.request.body)
logging.info('request body:')
logging.info(body)
self.response.write(json.dumps(body))
update_id = body['update_id']
message = body['message']
message_id = message.get('message_id')
date = message.get('date')
text = message.get('text')
fr = message.get('from')
chat = message['chat']
chat_id = chat['id']
if not text:
logging.info('no text')
return
def reply(msg=None, img=None):
if msg:
resp = urllib2.urlopen(BASE_URL + 'sendMessage', urllib.urlencode({
'chat_id': str(chat_id),
'text': msg.encode('utf-8'),
'disable_web_page_preview': 'true',
'reply_to_message_id': str(message_id),
})).read()
elif img:
resp = multipart.post_multipart(BASE_URL + 'sendPhoto', [
('chat_id', str(chat_id)),
('reply_to_message_id', str(message_id)),
], [
('photo', 'image.jpg', img),
])
else:
logging.error('no msg or img specified')
resp = None
logging.info('send response:')
logging.info(resp)
############## ОТСЮДА МОЖНО НАЧИНАТЬ КАСТОМИЗИРОВАТЬ ВАШ БОТ ##############
if text.startswith('/'):
#СПИСОК КОМАНД ДЛЯ БОТА, НАЧИНАЮЩИХСЯ С /
if text == '/start':
reply(messages['start_message'].decode('utf-8'))
setEnabled(chat_id, True)
elif text == '/stop':
mes = 'Пока!\n/start - запустить бот'
reply(mes.decode('utf-8'))
setEnabled(chat_id, False)
elif text == '/about':
reply(messages['about_message'].decode('utf-8'))
else:
mes = 'Не уверен, что понимаю, о чем ты...'
reply(mes.decode('utf-8'))
# Другие команды начинающиеся не с /
elif 'who are you' in text:
reply('telegram bot, created by Alexander Osipenko')
elif 'what time' in text:
reply('look at the top-right corner of your screen!')
################################################################################
else:
if getEnabled(chat_id):
try:
resp1 = json.load(urllib2.urlopen('http://www.simsimi.com/requestChat?lc=en&ft=1.0&req=' + urllib.quote_plus(text.encode('utf-8'))))
back = resp1.get('res')
except urllib2.HTTPError, err:
logging.error(err)
back = str(err)
if not back:
reply('okay...')
elif 'I HAVE NO RESPONSE' in back:
reply('you said something with no meaning')
else:
reply(back)
else:
logging.info('not enabled for chat_id {}'.format(chat_id))
app = webapp2.WSGIApplication([
('/me', MeHandler),
('/updates', GetUpdatesHandler),
('/set_webhook', SetWebhookHandler),
('/webhook', WebhookHandler),
], debug=True)
|
{
"content_hash": "278179d254e7f10909a81ded209b2576",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 152,
"avg_line_length": 30.50537634408602,
"alnum_prop": 0.5188579485371871,
"repo_name": "subpath/TelegramBot",
"id": "a4450b423dd0b4a9b7eff2f3efc67df36cfedf37",
"size": "6101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14461"
}
],
"symlink_target": ""
}
|
from django.db import migrations
def add_choose_permission_to_admin_groups(apps, _schema_editor):
ContentType = apps.get_model('contenttypes.ContentType')
Permission = apps.get_model('auth.Permission')
Group = apps.get_model('auth.Group')
# Get document content type
document_content_type, _created = ContentType.objects.get_or_create(
model='document',
app_label='wagtaildocs'
)
# Create the Choose permission (if it doesn't already exist)
choose_document_permission, _created = Permission.objects.get_or_create(
content_type=document_content_type,
codename='choose_document',
defaults={'name': 'Can choose document'}
)
# Assign it to all groups which have "Access the Wagtail admin" permission.
# This emulates the previous behavior, where everyone who would access the admin
# could choose any document in any Collection, because choosing wasn't permissioned.
for group in Group.objects.filter(permissions__codename='access_admin'):
group.permissions.add(choose_document_permission)
def remove_choose_permission(apps, _schema_editor):
"""Reverse the above additions of permissions."""
ContentType = apps.get_model('contenttypes.ContentType')
Permission = apps.get_model('auth.Permission')
document_content_type = ContentType.objects.get(
model='document',
app_label='wagtaildocs',
)
# This cascades to Group
Permission.objects.filter(
content_type=document_content_type,
codename='choose_document'
).delete()
def get_choose_permission(apps):
Permission = apps.get_model('auth.Permission')
ContentType = apps.get_model('contenttypes.ContentType')
document_content_type, _created = ContentType.objects.get_or_create(
model='document',
app_label='wagtaildocs',
)
return Permission.objects.filter(
content_type=document_content_type,
codename__in=['choose_document']
).first()
def copy_choose_permission_to_collections(apps, _schema_editor):
Collection = apps.get_model('wagtailcore.Collection')
Group = apps.get_model('auth.Group')
GroupCollectionPermission = apps.get_model('wagtailcore.GroupCollectionPermission')
root_collection = Collection.objects.get(depth=1)
permission = get_choose_permission(apps)
if permission:
for group in Group.objects.filter(permissions=permission):
GroupCollectionPermission.objects.create(
group=group,
collection=root_collection,
permission=permission
)
def remove_choose_permission_from_collections(apps, _schema_editor):
GroupCollectionPermission = apps.get_model('wagtailcore.GroupCollectionPermission')
choose_permission = get_choose_permission(apps)
if choose_permission:
GroupCollectionPermission.objects.filter(permission=choose_permission).delete()
class Migration(migrations.Migration):
dependencies = [
('wagtaildocs', '0010_document_file_hash'),
]
operations = [
migrations.AlterModelOptions(
name='document',
options={'permissions': [('choose_document', 'Can choose document')], 'verbose_name': 'document', 'verbose_name_plural': 'documents'},
),
migrations.RunPython(add_choose_permission_to_admin_groups, remove_choose_permission),
migrations.RunPython(copy_choose_permission_to_collections, remove_choose_permission_from_collections),
]
|
{
"content_hash": "7e2d44315721f687e838244726fcee54",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 146,
"avg_line_length": 37.1578947368421,
"alnum_prop": 0.6940509915014165,
"repo_name": "torchbox/wagtail",
"id": "1c902a6d4c2108210c2dffbae1dfefe606d6d352",
"size": "3579",
"binary": false,
"copies": "5",
"ref": "refs/heads/stable/2.15.x",
"path": "wagtail/documents/migrations/0011_add_choose_permissions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "178240"
},
{
"name": "HTML",
"bytes": "307456"
},
{
"name": "JavaScript",
"bytes": "123792"
},
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "2786743"
},
{
"name": "Shell",
"bytes": "7997"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
import re
from decimal import Decimal
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
F, Aggregate, Avg, Count, DecimalField, DurationField, FloatField, Func,
IntegerField, Max, Min, Sum, Value,
)
from django.test import TestCase, ignore_warnings
from django.test.utils import Approximate, CaptureQueriesContext
from django.utils import six, timezone
from django.utils.deprecation import RemovedInDjango110Warning
from .models import Author, Book, Publisher, Store
class AggregateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3, duration=datetime.timedelta(days=1))
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1, duration=datetime.timedelta(days=2))
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["age__sum"], 254)
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2)
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2)
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__rating__avg"], 4.0)
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["publisher__num_awards__sum"], 30)
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__price__sum"], Decimal("270.27"))
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 57)
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__publisher__num_awards__min"], 1)
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2)
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=self.b1.pk)
self.assertEqual(
b.name,
'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_defer(self):
qs = Book.objects.annotate(
page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
rows = [
(1, "159059725", 447, "The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.name)
)
def test_annotate_defer_select_related(self):
qs = Book.objects.select_related('contact').annotate(
page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
rows = [
(1, "159059725", 447, "Adrian Holovaty",
"The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name)
)
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 51.5),
('Practical Django Projects', 29.0),
('Python Web Development with Django', Approximate(30.3, places=1)),
('Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 4.5),
('Brad Dayley', 3.0),
('Jacob Kaplan-Moss', 4.5),
('James Bennett', 4.0),
('Paul Bissex', 4.0),
('Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 1),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 1),
('Peter Norvig', 2),
('Stuart Russell', 1),
('Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 7),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
('Practical Django Projects', 3),
('Python Web Development with Django', 7),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
('Apress', Decimal("59.69")),
("Jonno's House of Books", None),
('Morgan Kaufmann', Decimal("75.00")),
('Prentice Hall', Decimal("112.49")),
('Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg('authors__age')).values('pk', 'isbn', 'mean_age')
self.assertEqual(
list(books), [
{
"pk": 1,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books), [
{
"name": "The Definitive Guide to Django: Web Development Done Right"
}
]
)
books = Book.objects.filter(pk=self.b1.pk).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.values("rating").annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")).order_by("rating")
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertEqual(len(authors), 9)
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 32.0),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 29.5),
('James Bennett', 34.0),
('Jeffrey Forcier', 27.0),
('Paul Bissex', 31.0),
('Peter Norvig', 46.0),
('Stuart Russell', 57.0),
('Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{
"rating": 4.5,
"oldest": 35,
},
{
"rating": 3.0,
"oldest": 45
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 5.0,
"oldest": 57,
}
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{
"rating": 5.0,
"oldest": 57,
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 3.0,
"oldest": 45,
},
{
"rating": 4.5,
"oldest": 35,
}
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_avg_duration_field(self):
self.assertEqual(
Publisher.objects.aggregate(Avg('duration', output_field=DurationField())),
{'duration__avg': datetime.timedelta(days=1, hours=12)}
)
def test_sum_duration_field(self):
self.assertEqual(
Publisher.objects.aggregate(Sum('duration', output_field=DurationField())),
{'duration__sum': datetime.timedelta(days=3)}
)
def test_sum_distinct_aggregate(self):
"""
Sum on a distict() QuerySet should aggregate only the distinct items.
"""
authors = Author.objects.filter(book__in=[5, 6])
self.assertEqual(authors.count(), 3)
distinct_authors = authors.distinct()
self.assertEqual(distinct_authors.count(), 2)
# Selected author ages are 57 and 46
age_sum = distinct_authors.aggregate(Sum('age'))
self.assertEqual(age_sum['age__sum'], 103)
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1, book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Sams",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__exact=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = Author.objects.annotate(num_friends=Count("friends__id", distinct=True)).filter(num_friends=0).order_by("pk")
self.assertQuerysetEqual(
authors, [
"Brad Dayley",
],
lambda a: a.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
],
lambda p: p.name
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1)
self.assertQuerysetEqual(
books, [
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1).aggregate(Avg("rating"))
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(
earliest_book=Min("book__pubdate"),
).exclude(earliest_book=None).order_by("earliest_book").values(
'earliest_book',
'num_awards',
'id',
'name',
)
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': 4,
'name': 'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': 3,
'name': 'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': 1,
'name': 'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': 2,
'name': 'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("pk", "isbn", "mean_age")
self.assertEqual(
list(books), [
(1, "159059725", 34.5),
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(
list(books), [
('159059725',)
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(
list(books), [
(34.5,)
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age", flat=True)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
def test_dates_with_aggregation(self):
"""
Test that .dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year')
self.assertQuerysetEqual(
dates, [
"datetime.date(1991, 1, 1)",
"datetime.date(1995, 1, 1)",
"datetime.date(2007, 1, 1)",
"datetime.date(2008, 1, 1)"
]
)
def test_values_aggregation(self):
# Refs #20782
max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating'))
self.assertEqual(max_rating['max_rating'], 5)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id')
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3})
def test_ticket17424(self):
"""
Check that doing exclude() on a foreign model after annotate()
doesn't crash.
"""
all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk'))
annotated_books = Book.objects.order_by('pk').annotate(one=Count("id"))
# The value doesn't matter, we just need any negative
# constraint on a related model that's a noop.
excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
# Try to generate query tree
str(excluded_books.query)
self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
def test_ticket12886(self):
"""
Check that aggregation over sliced queryset works correctly.
"""
qs = Book.objects.all().order_by('-rating')[0:3]
vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating']
self.assertAlmostEqual(vals, 4.5, places=2)
def test_ticket11881(self):
"""
Check that subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE
or select_related() stuff.
"""
qs = Book.objects.all().select_for_update().order_by(
'pk').select_related('publisher').annotate(max_pk=Max('pk'))
with CaptureQueriesContext(connection) as captured_queries:
qs.aggregate(avg_pk=Avg('max_pk'))
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]['sql'].lower()
self.assertNotIn('for update', qstr)
forced_ordering = connection.ops.force_no_ordering()
if forced_ordering:
# If the backend needs to force an ordering we make sure it's
# the only "ORDER BY" clause present in the query.
self.assertEqual(
re.findall(r'order by (\w+)', qstr),
[', '.join(f[1][0] for f in forced_ordering).lower()]
)
else:
self.assertNotIn('order by', qstr)
self.assertEqual(qstr.count(' join '), 0)
def test_decimal_max_digits_has_no_effect(self):
Book.objects.all().delete()
a1 = Author.objects.first()
p1 = Publisher.objects.first()
thedate = timezone.now()
for i in range(10):
Book.objects.create(
isbn="abcde{}".format(i), name="none", pages=10, rating=4.0,
price=9999.98, contact=a1, publisher=p1, pubdate=thedate)
book = Book.objects.aggregate(price_sum=Sum('price'))
self.assertEqual(book['price_sum'], Decimal("99999.80"))
def test_nonaggregate_aggregation_throws(self):
with six.assertRaisesRegex(self, TypeError, 'fail is not an aggregate expression'):
Book.objects.aggregate(fail=F('price'))
def test_nonfield_annotation(self):
book = Book.objects.annotate(val=Max(Value(2, output_field=IntegerField()))).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(Value(2), output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
def test_missing_output_field_raises_error(self):
with six.assertRaisesRegex(self, FieldError, 'Cannot resolve expression type, unknown output_field'):
Book.objects.annotate(val=Max(2)).first()
def test_annotation_expressions(self):
authors = Author.objects.annotate(combined_ages=Sum(F('age') + F('friends__age'))).order_by('name')
authors2 = Author.objects.annotate(combined_ages=Sum('age') + Sum('friends__age')).order_by('name')
for qs in (authors, authors2):
self.assertEqual(len(qs), 9)
self.assertQuerysetEqual(
qs, [
('Adrian Holovaty', 132),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 129),
('James Bennett', 63),
('Jeffrey Forcier', 128),
('Paul Bissex', 120),
('Peter Norvig', 103),
('Stuart Russell', 103),
('Wesley J. Chun', 176)
],
lambda a: (a.name, a.combined_ages)
)
def test_aggregation_expressions(self):
a1 = Author.objects.aggregate(av_age=Sum('age') / Count('*'))
a2 = Author.objects.aggregate(av_age=Sum('age') / Count('age'))
a3 = Author.objects.aggregate(av_age=Avg('age'))
self.assertEqual(a1, {'av_age': 37})
self.assertEqual(a2, {'av_age': 37})
self.assertEqual(a3, {'av_age': Approximate(37.4, places=1)})
def test_avg_decimal_field(self):
v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price')))['avg_price']
self.assertIsInstance(v, float)
self.assertEqual(v, Approximate(47.39, places=2))
def test_order_of_precedence(self):
p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price') + 2) * 3)
self.assertEqual(p1, {'avg_price': Approximate(148.18, places=2)})
p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg('price') + 2 * 3)
self.assertEqual(p2, {'avg_price': Approximate(53.39, places=2)})
def test_combine_different_types(self):
with six.assertRaisesRegex(self, FieldError, 'Expression contains mixed types. You must set output_field'):
Book.objects.annotate(sums=Sum('rating') + Sum('pages') + Sum('price')).get(pk=self.b4.pk)
b1 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=IntegerField())).get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
b2 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=FloatField())).get(pk=self.b4.pk)
self.assertEqual(b2.sums, 383.69)
b3 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=DecimalField())).get(pk=self.b4.pk)
self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2))
def test_complex_aggregations_require_kwarg(self):
with six.assertRaisesRegex(self, TypeError, 'Complex annotations require an alias'):
Author.objects.annotate(Sum(F('age') + F('friends__age')))
with six.assertRaisesRegex(self, TypeError, 'Complex aggregates require an alias'):
Author.objects.aggregate(Sum('age') / Count('age'))
with six.assertRaisesRegex(self, TypeError, 'Complex aggregates require an alias'):
Author.objects.aggregate(Sum(1))
def test_aggregate_over_complex_annotation(self):
qs = Author.objects.annotate(
combined_ages=Sum(F('age') + F('friends__age')))
age = qs.aggregate(max_combined_age=Max('combined_ages'))
self.assertEqual(age['max_combined_age'], 176)
age = qs.aggregate(max_combined_age_doubled=Max('combined_ages') * 2)
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age=Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age'], 954)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age_doubled=Sum('combined_ages') + Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age_doubled'], 954 * 2)
def test_values_annotation_with_expression(self):
# ensure the F() is promoted to the group by clause
qs = Author.objects.values('name').annotate(another_age=Sum('age') + F('age'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['another_age'], 68)
qs = qs.annotate(friend_count=Count('friends'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['friend_count'], 2)
qs = qs.annotate(combined_age=Sum('age') + F('friends__age')).filter(
name="Adrian Holovaty").order_by('-combined_age')
self.assertEqual(
list(qs), [
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 69
},
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 63
}
]
)
vals = qs.values('name', 'combined_age')
self.assertEqual(
list(vals), [
{
"name": 'Adrian Holovaty',
"combined_age": 69
},
{
"name": 'Adrian Holovaty',
"combined_age": 63
}
]
)
def test_annotate_values_aggregate(self):
alias_age = Author.objects.annotate(
age_alias=F('age')
).values(
'age_alias',
).aggregate(sum_age=Sum('age_alias'))
age = Author.objects.values('age').aggregate(sum_age=Sum('age'))
self.assertEqual(alias_age['sum_age'], age['sum_age'])
def test_annotate_over_annotate(self):
author = Author.objects.annotate(
age_alias=F('age')
).annotate(
sum_age=Sum('age_alias')
).get(name="Adrian Holovaty")
other_author = Author.objects.annotate(
sum_age=Sum('age')
).get(name="Adrian Holovaty")
self.assertEqual(author.sum_age, other_author.sum_age)
def test_annotated_aggregate_over_annotated_aggregate(self):
with six.assertRaisesRegex(self, FieldError, "Cannot compute Sum\('id__max'\): 'id__max' is an aggregate"):
Book.objects.annotate(Max('id')).annotate(Sum('id__max'))
def test_add_implementation(self):
class MySum(Sum):
pass
# test completely changing how the output is rendered
def lower_case_function_override(self, compiler, connection):
sql, params = compiler.compile(self.source_expressions[0])
substitutions = dict(function=self.function.lower(), expressions=sql)
substitutions.update(self.extra)
return self.template % substitutions, params
setattr(MySum, 'as_' + connection.vendor, lower_case_function_override)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test changing the dict and delegating
def lower_case_function_super(self, compiler, connection):
self.extra['function'] = self.function.lower()
return super(MySum, self).as_sql(compiler, connection)
setattr(MySum, 'as_' + connection.vendor, lower_case_function_super)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test overriding all parts of the template
def be_evil(self, compiler, connection):
substitutions = dict(function='MAX', expressions='2')
substitutions.update(self.extra)
return self.template % substitutions, ()
setattr(MySum, 'as_' + connection.vendor, be_evil)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('MAX('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 2)
def test_complex_values_aggregation(self):
max_rating = Book.objects.values('rating').aggregate(
double_max_rating=Max('rating') + Max('rating'))
self.assertEqual(max_rating['double_max_rating'], 5 * 2)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id') + 5
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3 + 5})
def test_expression_on_aggregation(self):
# Create a plain expression
class Greatest(Func):
function = 'GREATEST'
def as_sqlite(self, compiler, connection):
return super(Greatest, self).as_sql(compiler, connection, function='MAX')
qs = Publisher.objects.annotate(
price_or_median=Greatest(Avg('book__rating'), Avg('book__price'))
).filter(price_or_median__gte=F('num_awards')).order_by('num_awards')
self.assertQuerysetEqual(
qs, [1, 3, 7, 9], lambda v: v.num_awards)
qs2 = Publisher.objects.annotate(
rating_or_num_awards=Greatest(Avg('book__rating'), F('num_awards'),
output_field=FloatField())
).filter(rating_or_num_awards__gt=F('num_awards')).order_by('num_awards')
self.assertQuerysetEqual(
qs2, [1, 3], lambda v: v.num_awards)
@ignore_warnings(category=RemovedInDjango110Warning)
def test_backwards_compatibility(self):
from django.db.models.sql import aggregates as sql_aggregates
class SqlNewSum(sql_aggregates.Aggregate):
sql_function = 'SUM'
class NewSum(Aggregate):
name = 'Sum'
def add_to_query(self, query, alias, col, source, is_summary):
klass = SqlNewSum
aggregate = klass(
col, source=source, is_summary=is_summary, **self.extra)
query.annotations[alias] = aggregate
qs = Author.objects.values('name').annotate(another_age=NewSum('age') + F('age'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['another_age'], 68)
|
{
"content_hash": "74e72d23c2f38d45e51e9e21d0251b0b",
"timestamp": "",
"source": "github",
"line_count": 1089,
"max_line_length": 155,
"avg_line_length": 39.637281910009186,
"alnum_prop": 0.5392100081084211,
"repo_name": "adelton/django",
"id": "5b0bcb82bc015abdb970ed554e2fef834b5589c2",
"size": "43165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/aggregation/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "43253"
},
{
"name": "HTML",
"bytes": "171790"
},
{
"name": "JavaScript",
"bytes": "105273"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11095192"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Target table.
"""
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import Table
__docformat__ = "reStructuredText en"
__all__ = ['create_table']
def create_table(metadata, transcript_tbl, molecule_design_tbl):
"Table factory."
tbl = Table('target', metadata,
Column('target_id', Integer, primary_key=True),
Column('molecule_design_id', Integer,
ForeignKey(molecule_design_tbl.c.molecule_design_id),
nullable=False),
Column('transcript_id', Integer,
ForeignKey(transcript_tbl.c.transcript_id), nullable=False)
)
return tbl
|
{
"content_hash": "1fc665cea0d7d978c01a7834fa9c7241",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 80,
"avg_line_length": 32.148148148148145,
"alnum_prop": 0.6693548387096774,
"repo_name": "helixyte/TheLMA",
"id": "c21e56dfe6f9f876b42d6da2123e6a18e7e5cd8a",
"size": "868",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thelma/repositories/rdb/schema/tables/target.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3126"
},
{
"name": "Python",
"bytes": "3329729"
},
{
"name": "Shell",
"bytes": "3071"
}
],
"symlink_target": ""
}
|
import os
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
# To set up environmental variables, see http://twil.io/secure
account_sid = os.environ['TWILIO_ACCOUNT_SID']
auth_token = os.environ['TWILIO_AUTH_TOKEN']
client = Client(account_sid, auth_token)
document = client.sync \
.services("ISXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.documents("MyFirstDocument") \
.fetch()
print(document.data)
|
{
"content_hash": "b9ca20ededc9b887fc5971bc0bcb8cc9",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 62,
"avg_line_length": 30.133333333333333,
"alnum_prop": 0.745575221238938,
"repo_name": "TwilioDevEd/api-snippets",
"id": "75335ec6a8f3982cf82c3ee01a46f6b8bdaef519",
"size": "525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sync/rest/documents/retrieve-document/retrieve-document.7.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "637161"
},
{
"name": "C++",
"bytes": "24856"
},
{
"name": "Go",
"bytes": "7217"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "912474"
},
{
"name": "JavaScript",
"bytes": "512877"
},
{
"name": "M",
"bytes": "147"
},
{
"name": "Objective-C",
"bytes": "53325"
},
{
"name": "PHP",
"bytes": "517186"
},
{
"name": "Python",
"bytes": "442184"
},
{
"name": "Ruby",
"bytes": "438928"
},
{
"name": "Shell",
"bytes": "3854"
},
{
"name": "Swift",
"bytes": "42345"
},
{
"name": "TypeScript",
"bytes": "16767"
}
],
"symlink_target": ""
}
|
import pytest
def raising_checker(value, system):
from snovault.auditor import AuditFailure
if not value.get('checker1'):
raise AuditFailure('testchecker', 'Missing checker1')
def returning_checker(value, system):
from snovault.auditor import AuditFailure
if not value.get('checker1'):
return AuditFailure('testchecker', 'Missing checker1')
def yielding_checker(value, system):
from snovault.auditor import AuditFailure
if not value.get('checker1'):
yield AuditFailure('testchecker', 'Missing checker1')
def has_condition1(value, system):
return value.get('condition1')
@pytest.fixture(params=[
raising_checker,
returning_checker,
yielding_checker,
])
def auditor(request):
from snovault.auditor import Auditor
auditor = Auditor()
auditor.add_audit_checker(request.param, 'test')
return auditor
@pytest.fixture
def auditor_conditions():
from snovault.auditor import Auditor
auditor = Auditor()
auditor.add_audit_checker(raising_checker, 'test', has_condition1)
return auditor
@pytest.fixture
def dummy_request(registry):
from pyramid.testing import DummyRequest
_embed = {}
request = DummyRequest(registry=registry, _embed=_embed, embed=lambda path: _embed[path])
return request
def test_audit_pass(auditor, dummy_request):
value = {'checker1': True}
dummy_request._embed['/foo/@@embedded'] = value
errors = auditor.audit(request=dummy_request, path='/foo/', types='test')
assert errors == []
def test_audit_failure(auditor, dummy_request):
value = {}
dummy_request._embed['/foo/@@embedded'] = value
error, = auditor.audit(request=dummy_request, path='/foo/', types='test')
assert error['detail'] == 'Missing checker1'
assert error['category'] == 'testchecker'
assert error['level'] == 0
assert error['path'] == '/foo/'
def test_audit_conditions(auditor_conditions, dummy_request):
value = {}
dummy_request._embed['/foo/@@embedded'] = value
assert auditor_conditions.audit(request=dummy_request, path='/foo/', types='test') == []
value = {'condition1': True}
dummy_request._embed['/foo/@@embedded'] = value
error, = auditor_conditions.audit(request=dummy_request, path='/foo/', types='test')
assert error['detail'] == 'Missing checker1'
assert error['category'] == 'testchecker'
assert error['level'] == 0
assert error['path'] == '/foo/'
def test_declarative_config(dummy_request):
from snovault.interfaces import AUDITOR
from pyramid.config import Configurator
config = Configurator()
config.include('snovault.config')
config.include('snovault.auditor')
config.include('.testing_auditor')
config.commit()
auditor = config.registry[AUDITOR]
value = {'condition1': True}
dummy_request._embed['/foo/@@embedded'] = value
error, = auditor.audit(request=dummy_request, path='/foo/', types='TestingLinkSource')
assert error['detail'] == 'Missing checker1'
assert error['category'] == 'testchecker'
assert error['level'] == 0
assert error['path'] == '/foo/'
def test_link_target_audit_fail(testapp):
target = {'uuid': '775795d3-4410-4114-836b-8eeecf1d0c2f', 'status': 'CHECK'}
testapp.post_json('/testing_link_target', target, status=201)
res = testapp.get('/%s/@@index-data' % target['uuid']).maybe_follow()
errors_dict = res.json['audit']
errors_list = []
for error_type in errors_dict:
errors_list.extend(errors_dict[error_type])
errors = [e for e in errors_list if e['name'] == 'testing_link_target_status']
error, = errors
assert error['detail'] == 'Missing reverse items'
assert error['category'] == 'status'
assert error['level'] == 0
assert error['path'] == res.json['object']['@id']
def test_link_target_audit_pass(testapp):
target = {'uuid': '775795d3-4410-4114-836b-8eeecf1d0c2f', 'status': 'CHECK'}
testapp.post_json('/testing_link_target', target, status=201)
source = {'uuid': '16157204-8c8f-4672-a1a4-14f4b8021fcd', 'target': target['uuid']}
testapp.post_json('/testing_link_source', source, status=201)
res = testapp.get('/%s/@@index-data' % target['uuid']).maybe_follow()
errors_dict = res.json['audit']
errors_list = []
for error_type in errors_dict:
errors_list.extend(errors_dict[error_type])
errors = [e for e in errors_list if e['name'] == 'testing_link_target_status']
assert errors == []
|
{
"content_hash": "8a63da2102aedd56b4d070c1f7ec08f9",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 93,
"avg_line_length": 34.55384615384615,
"alnum_prop": 0.6705253784505788,
"repo_name": "T2DREAM/t2dream-portal",
"id": "62a103caaa7a82c6ece2342236d65fbb0417948e",
"size": "4492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/encoded/tests/test_auditor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AngelScript",
"bytes": "741"
},
{
"name": "CSS",
"bytes": "2874"
},
{
"name": "Gherkin",
"bytes": "16776"
},
{
"name": "HTML",
"bytes": "373076"
},
{
"name": "JavaScript",
"bytes": "1320205"
},
{
"name": "Makefile",
"bytes": "106"
},
{
"name": "Python",
"bytes": "1567328"
},
{
"name": "SCSS",
"bytes": "336182"
},
{
"name": "Shell",
"bytes": "4199"
}
],
"symlink_target": ""
}
|
"""This is generic driver class implementation."""
from functools import partial
import re
import logging
import pexpect
from condoor.actions import a_send, a_connection_closed, a_stays_connected, a_unexpected_prompt, a_expected_prompt
from condoor.fsm import FSM
from condoor.exceptions import ConnectionError, CommandError, CommandSyntaxError, CommandTimeoutError
from condoor.utils import pattern_to_str
from condoor import pattern_manager
logger = logging.getLogger(__name__)
class Driver(object):
"""This is generic Driver class implementation."""
platform = 'generic'
inventory_cmd = None
users_cmd = None
target_prompt_components = ['prompt_dynamic']
prepare_terminal_session = ['terminal len 0']
families = {}
def __init__(self, device):
"""Initialize the Driver object."""
self.device = device
# FIXME: Do something with this, it's insane
self.prompt_re = pattern_manager.pattern(self.platform, 'prompt')
self.syntax_error_re = pattern_manager.pattern(self.platform, 'syntax_error')
self.connection_closed_re = pattern_manager.pattern(self.platform, 'connection_closed')
self.press_return_re = pattern_manager.pattern(self.platform, 'press_return')
self.more_re = pattern_manager.pattern(self.platform, 'more')
self.rommon_re = pattern_manager.pattern(self.platform, 'rommon')
self.buffer_overflow_re = pattern_manager.pattern(self.platform, 'buffer_overflow')
self.username_re = pattern_manager.pattern(self.platform, 'username')
self.password_re = pattern_manager.pattern(self.platform, 'password')
self.authentication_error_re = pattern_manager.pattern(self.platform, 'authentication_error')
self.unable_to_connect_re = pattern_manager.pattern(self.platform, 'unable_to_connect')
self.timeout_re = pattern_manager.pattern(self.platform, 'timeout')
self.standby_re = pattern_manager.pattern(self.platform, 'standby')
self.pid2platform_re = pattern_manager.pattern(self.platform, 'pid2platform')
self.platform_re = pattern_manager.pattern(self.platform, 'platform', compiled=False)
self.version_re = pattern_manager.pattern(self.platform, 'version', compiled=False)
self.vty_re = pattern_manager.pattern(self.platform, 'vty')
self.console_re = pattern_manager.pattern(self.platform, 'console')
def __repr__(self):
"""Return the string representation of the driver class."""
return str(self.platform)
def get_version_text(self):
"""Return the version information from the device."""
try:
version_text = self.device.send("show version brief", timeout=120)
except CommandError:
# IOS Hack - need to check if show version brief is supported on IOS/IOS XE
version_text = self.device.send("show version", timeout=120)
return version_text
def get_inventory_text(self):
"""Return the inventory information from the device."""
inventory_text = None
if self.inventory_cmd:
try:
inventory_text = self.device.send(self.inventory_cmd, timeout=120)
logger.debug('Inventory collected')
except CommandError:
logger.debug('Unable to collect inventory')
else:
logger.debug('No inventory command for {}'.format(self.platform))
return inventory_text
def get_hostname_text(self): # pylint: disable=no-self-use
"""Return the hostname information from the device."""
return None
def get_users_text(self):
"""Return the users logged in information from the device."""
users_text = None
if self.users_cmd:
try:
users_text = self.device.send(self.users_cmd, timeout=60)
except CommandError:
logger.debug('Unable to collect connected users information')
else:
logger.debug('No users command for {}'.format(self.platform))
return users_text
def get_os_type(self, version_text): # pylint: disable=no-self-use
"""Return the OS type information from the device."""
os_type = None
if version_text is None:
return os_type
match = re.search("(XR|XE|NX-OS)", version_text)
if match:
os_type = match.group(1)
else:
os_type = 'IOS'
if os_type == "XR":
match = re.search("Build Information", version_text)
if match:
os_type = "eXR"
match = re.search("XR Admin Software", version_text)
if match:
os_type = "Calvados"
return os_type
def get_os_version(self, version_text):
"""Return the OS version information from the device."""
os_version = None
if version_text is None:
return os_version
match = re.search(self.version_re, version_text, re.MULTILINE)
if match:
os_version = match.group(1)
return os_version
def get_hw_family(self, version_text):
"""Return the HW family information from the device."""
family = None
if version_text is None:
return family
match = re.search(self.platform_re, version_text, re.MULTILINE)
if match:
logger.debug("Platform string: {}".format(match.group()))
family = match.group(1)
for key, value in self.families.items():
if family.startswith(key):
family = value
break
else:
logger.debug("Platform string not present. Refer to CSCux08958")
return family
def get_hw_platform(self, udi):
"""Return th HW platform information from the device."""
platform = None
try:
pid = udi['pid']
match = re.search(self.pid2platform_re, pid)
if match:
platform = match.group(1)
except KeyError:
pass
return platform
def is_console(self, users_text):
"""Return if device is connected over console."""
if users_text is None:
logger.debug("Console information not collected")
return None
for line in users_text.split('\n'):
if '*' in line:
match = re.search(self.vty_re, line)
if match:
logger.debug("Detected connection to vty")
return False
else:
match = re.search(self.console_re, line)
if match:
logger.debug("Detected connection to console")
return True
logger.debug("Connection port unknown")
return None
def update_driver(self, prompt):
"""Update driver based on the prompt."""
logger.debug(prompt)
platform = pattern_manager.platform(prompt)
if platform:
logger.debug('{} -> {}'.format(self.platform, platform))
return platform
else:
logger.debug('No update: {}'.format(self.platform))
return self.platform
def wait_for_string(self, expected_string, timeout=60):
"""Wait for string FSM."""
# 0 1 2 3
events = [self.syntax_error_re, self.connection_closed_re, expected_string, self.press_return_re,
# 4 5 6 7
self.more_re, pexpect.TIMEOUT, pexpect.EOF, self.buffer_overflow_re]
# add detected prompts chain
events += self.device.get_previous_prompts() # without target prompt
logger.debug("Expecting: {}".format(pattern_to_str(expected_string)))
transitions = [
(self.syntax_error_re, [0], -1, CommandSyntaxError("Command unknown", self.device.hostname), 0),
(self.connection_closed_re, [0], 1, a_connection_closed, 10),
(pexpect.TIMEOUT, [0], -1, CommandTimeoutError("Timeout waiting for prompt", self.device.hostname), 0),
(pexpect.EOF, [0, 1], -1, ConnectionError("Unexpected device disconnect", self.device.hostname), 0),
(self.more_re, [0], 0, partial(a_send, " "), 10),
(expected_string, [0, 1], -1, a_expected_prompt, 0),
(self.press_return_re, [0], -1, a_stays_connected, 0),
# TODO: Customize in XR driver
(self.buffer_overflow_re, [0], -1, CommandSyntaxError("Command too long", self.device.hostname), 0)
]
for prompt in self.device.get_previous_prompts():
transitions.append((prompt, [0, 1], 0, a_unexpected_prompt, 0))
fsm = FSM("WAIT-4-STRING", self.device, events, transitions, timeout=timeout)
return fsm.run()
# def send_xml(self, command, timeout=60):
# """
# Handle error i.e.
# ERROR: 0x24319600 'XML-TTY' detected the 'informational' condition
# 'The XML TTY Agent has not yet been started.
# Check that the configuration 'xml agent tty' has been committed.'
# """
# self._debug("Starting XML TTY Agent")
# result = self.send("xml")
# self._info("XML TTY Agent started")
#
# result = self.send(command, timeout=timeout)
# self.ctrl.sendcontrol('c')
# return result
# def netconf(self, command):
# """
# Handle error i.e.
# ERROR: 0x24319600 'XML-TTY' detected the 'informational' condition
# 'The XML TTY Agent has not yet been started.
# Check that the configuration 'xml agent tty' has been committed.'
# """
# self._debug("Starting XML TTY Agent")
# result = self.send("netconf", wait_for_string=']]>]]>')
# self._info("XML TTY Agent started")
#
# self.ctrl.send(command)
# self.ctrl.send("\r\n")
# self.ctrl.expect("]]>]]>")
# result = self.ctrl.before
# self.ctrl.sendcontrol('c')
# self.send()
# return result
def enable(self, enable_password):
"""Change the device mode to privileged.
If device does not support privileged mode the
the informational message to the log will be posted.
Args:
enable_password (str): The privileged mode password. This is optional parameter. If password is not
provided but required the password from url will be used. Refer to :class:`condoor.Connection`
"""
logger.info("Privileged mode not supported on {} platform".format(self.platform))
def reload(self, reload_timeout=300, save_config=True):
"""Reload the device and waits for device to boot up.
It posts the informational message to the log if not implemented by device driver.
"""
logger.info("Reload not implemented on {} platform".format(self.platform))
def after_connect(self):
"""Execute right after connecting to the device."""
pass
def base_prompt(self, prompt):
"""Extract the base prompt pattern."""
if prompt is None:
return None
if not self.device.is_target:
return prompt
pattern = pattern_manager.pattern(self.platform, "prompt_dynamic", compiled=False)
pattern = pattern.format(prompt="(?P<prompt>.*?)")
result = re.search(pattern, prompt)
if result:
base = result.group("prompt") + "#"
logger.debug("base prompt: {}".format(base))
return base
else:
logger.error("Unable to extract the base prompt")
return prompt
def make_dynamic_prompt(self, prompt):
"""Extend prompt with flexible mode handling regexp."""
patterns = [pattern_manager.pattern(
self.platform, pattern_name, compiled=False) for pattern_name in self.target_prompt_components]
patterns_re = "|".join(patterns).format(prompt=re.escape(prompt[:-1]))
try:
prompt_re = re.compile(patterns_re)
except re.error as e: # pylint: disable=invalid-name
raise RuntimeError("Pattern compile error: {} ({}:{})".format(e.message, self.platform, patterns_re))
logger.debug("Platform: {} -> Dynamic prompt: '{}'".format(self.platform, prompt_re.pattern))
return prompt_re
def update_config_mode(self, prompt): # pylint: disable=no-self-use
"""Update config mode based on the prompt analysis."""
if 'config' in prompt:
mode = 'config'
elif 'admin' in prompt:
mode = 'admin'
else:
mode = 'global'
logger.debug("Mode: {}".format(mode))
return mode
def update_hostname(self, prompt):
"""Update the hostname based on the prompt analusis."""
result = re.search(self.prompt_re, prompt)
if result:
hostname = result.group('hostname')
logger.debug("Hostname detected: {}".format(hostname))
else:
hostname = self.device.hostname
logger.debug("Hostname not set: {}".format(prompt))
return hostname
|
{
"content_hash": "32badd7bf8dc916030c4a5ebfc037760",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 115,
"avg_line_length": 40.43333333333333,
"alnum_prop": 0.5952934122760998,
"repo_name": "kstaniek/condoor-ng",
"id": "c11e3b2585177aa4a58e37972f0390fe3f9ea886",
"size": "13343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "condoor/drivers/generic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "239845"
}
],
"symlink_target": ""
}
|
"""oAuth2 functions and classes for Geocaching API integration."""
from __future__ import annotations
from typing import Any
from homeassistant.components.application_credentials import (
AuthImplementation,
AuthorizationServer,
ClientCredential,
)
from homeassistant.core import HomeAssistant
from .const import ENVIRONMENT, ENVIRONMENT_URLS
class GeocachingOAuth2Implementation(AuthImplementation):
"""Local OAuth2 implementation for Geocaching."""
def __init__(
self,
hass: HomeAssistant,
auth_domain: str,
credential: ClientCredential,
) -> None:
"""Local Geocaching Oauth Implementation."""
super().__init__(
hass=hass,
auth_domain=auth_domain,
credential=credential,
authorization_server=AuthorizationServer(
authorize_url=ENVIRONMENT_URLS[ENVIRONMENT]["authorize_url"],
token_url=ENVIRONMENT_URLS[ENVIRONMENT]["token_url"],
),
)
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
return {"scope": "*", "response_type": "code"}
async def async_resolve_external_data(self, external_data: Any) -> dict:
"""Initialize local Geocaching API auth implementation."""
redirect_uri = external_data["state"]["redirect_uri"]
data = {
"grant_type": "authorization_code",
"code": external_data["code"],
"redirect_uri": redirect_uri,
}
token = await self._token_request(data)
# Store the redirect_uri (Needed for refreshing token, but not according to oAuth2 spec!)
token["redirect_uri"] = redirect_uri
return token
async def _async_refresh_token(self, token: dict) -> dict:
"""Refresh tokens."""
data = {
"client_id": self.client_id,
"client_secret": self.client_secret,
"grant_type": "refresh_token",
"refresh_token": token["refresh_token"],
# Add previously stored redirect_uri (Mandatory, but not according to oAuth2 spec!)
"redirect_uri": token["redirect_uri"],
}
new_token = await self._token_request(data)
return {**token, **new_token}
|
{
"content_hash": "ca83470d630b6993b2c5dee7a8b12567",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 97,
"avg_line_length": 35.22727272727273,
"alnum_prop": 0.6163440860215054,
"repo_name": "w1ll1am23/home-assistant",
"id": "848c4fce66cd8000b80e7024a120441003404254",
"size": "2325",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/geocaching/oauth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""Use the HTMLParser library to parse HTML files that aren't too bad."""
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__all__ = [
'HTMLParserTreeBuilder',
]
from html.parser import HTMLParser
try:
from html.parser import HTMLParseError
except ImportError as e:
# HTMLParseError is removed in Python 3.5. Since it can never be
# thrown in 3.5, we can just define our own class as a placeholder.
class HTMLParseError(Exception):
pass
import sys
import warnings
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = major == 3 and minor == 2 and release >= 3
CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3
CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4
from bs4.element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from bs4.dammit import EntitySubstitution, UnicodeDammit
from bs4.builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def __init__(self, *args, **kwargs):
HTMLParser.__init__(self, *args, **kwargs)
# Keep a list of empty-element tags that were encountered
# without an explicit closing tag. If we encounter a closing tag
# of this type, we'll associate it with one of those entries.
#
# This isn't a stack because we don't care about the
# order. It's a list of closing tags we've already handled and
# will ignore, assuming they ever show up.
self.already_closed_empty_element = []
def handle_startendtag(self, name, attrs):
# This is only called when the markup looks like
# <tag/>.
# is_startend() tells handle_starttag not to close the tag
# just because its name matches a known empty-element tag. We
# know that this is an empty-element tag and we want to call
# handle_endtag ourselves.
tag = self.handle_starttag(name, attrs, handle_empty_element=False)
self.handle_endtag(name)
def handle_starttag(self, name, attrs, handle_empty_element=True):
# XXX namespace
attr_dict = {}
for key, value in attrs:
# Change None attribute values to the empty string
# for consistency with the other tree builders.
if value is None:
value = ''
attr_dict[key] = value
attrvalue = '""'
#print "START", name
tag = self.soup.handle_starttag(name, None, None, attr_dict)
if tag and tag.is_empty_element and handle_empty_element:
# Unlike other parsers, html.parser doesn't send separate end tag
# events for empty-element tags. (It's handled in
# handle_startendtag, but only if the original markup looked like
# <tag/>.)
#
# So we need to call handle_endtag() ourselves. Since we
# know the start event is identical to the end event, we
# don't want handle_endtag() to cross off any previous end
# events for tags of this name.
self.handle_endtag(name, check_already_closed=False)
# But we might encounter an explicit closing tag for this tag
# later on. If so, we want to ignore it.
self.already_closed_empty_element.append(name)
def handle_endtag(self, name, check_already_closed=True):
#print "END", name
if check_already_closed and name in self.already_closed_empty_element:
# This is a redundant end tag for an empty-element tag.
# We've already called handle_endtag() for it, so just
# check it off the list.
# print "ALREADY CLOSED", name
self.already_closed_empty_element.remove(name)
else:
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed in all supported versions.
# http://bugs.python.org/issue13633
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
elif name.startswith('X'):
real_name = int(name.lstrip('X'), 16)
else:
real_name = int(name)
try:
data = chr(real_name)
except (ValueError, OverflowError) as e:
data = "\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
elif data == 'DOCTYPE':
# i.e. "<!DOCTYPE>"
data = ''
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
picklable = True
NAME = HTMLPARSER
features = [NAME, HTML, STRICT]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT and not CONSTRUCTOR_STRICT_IS_DEPRECATED:
kwargs['strict'] = False
if CONSTRUCTOR_TAKES_CONVERT_CHARREFS:
kwargs['convert_charrefs'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None, exclude_encodings=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, str):
yield (markup, None, None, False)
return
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True,
exclude_encodings=exclude_encodings)
yield (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
try:
parser.feed(markup)
except HTMLParseError as e:
warnings.warn(RuntimeWarning(
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
raise e
parser.already_closed_empty_element = []
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
|
{
"content_hash": "5d38bd03feb5c51230fb613e03a7d9f3",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 318,
"avg_line_length": 36.97133757961783,
"alnum_prop": 0.584460332500646,
"repo_name": "williamfeng323/py-web",
"id": "907d355b65926f31254c53eb7ebc2a28042f436b",
"size": "11609",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "flask/lib/python3.6/site-packages/bs4/builder/_htmlparser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "39957"
},
{
"name": "CSS",
"bytes": "6270"
},
{
"name": "HTML",
"bytes": "6046"
},
{
"name": "JavaScript",
"bytes": "6264"
},
{
"name": "Mako",
"bytes": "10018"
},
{
"name": "Python",
"bytes": "15554131"
},
{
"name": "Shell",
"bytes": "6007"
}
],
"symlink_target": ""
}
|
import os
import sys
from xml.dom import minidom
from termcolor import colored
def find_root_maven_dir_from_current_dir():
cwd = os.getcwd()
dirs = cwd.split('/')
highest_dir_with_pom = None
if 'pom.xml' in os.listdir(cwd):
highest_dir_with_pom = cwd
#print 'ABOUT TO LOOP'
for i in range(1, len(dirs) - 1):
end_index = -1 * i
current_dir_path = '/'.join(dirs[0:end_index])
files_in_current_dir = os.listdir(current_dir_path)
if 'pom.xml' in files_in_current_dir:
highest_dir_with_pom = current_dir_path
if highest_dir_with_pom is None:
print colored("Doesn't look like you're in a Jive project", 'red')
sys.exit(1)
return highest_dir_with_pom
def find_jdbc_string():
highest_dir_with_pom = find_root_maven_dir_from_current_dir()
if highest_dir_with_pom is None:
print colored("DIDN'T FIND A pom.xml FILE", 'red')
else:
#print "highest_dir_with_pom: " + highest_dir_with_pom
startup_file = highest_dir_with_pom + "/target/jiveHome/jive_startup.xml"
if not os.path.isfile(startup_file):
print colored("Could not find jive_startup.xml.", 'red')
print colored("Expected " + startup_file, 'red')
print colored("You might not be in a Jive project.", 'red')
sys.exit(1)
jive_startup_xml = minidom.parse(startup_file)
jdbc_string_node = jive_startup_xml.getElementsByTagName("serverURL")[0]
jdbc_string = jdbc_string_node.firstChild.data
#print "jdbc_string: " + jdbc_string
return jdbc_string
def find_database_name():
jdbc_string = find_jdbc_string()
if jdbc_string:
return jdbc_string.split('/')[-1]
|
{
"content_hash": "7c67f5afc6d4af32993fbe9b4082e920",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 81,
"avg_line_length": 28.142857142857142,
"alnum_prop": 0.6153412295544275,
"repo_name": "digitaldarwin/jiver",
"id": "9e98c1f8ddf038d85274c2035b565569e14bef80",
"size": "1775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jiver/maven_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46308"
},
{
"name": "Shell",
"bytes": "2850"
}
],
"symlink_target": ""
}
|
class Tile:
def __init__(self, unk1Instance):
self.unk1Instance = unk1Instance
def Draw(self, camera):
[translation, zoom] = camera.GetTransform()
|
{
"content_hash": "ef23d5cde76a5f0bf82653c489eb5eaa",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 51,
"avg_line_length": 24.714285714285715,
"alnum_prop": 0.6358381502890174,
"repo_name": "simply-jos/birth-of-the-toolkit",
"id": "0407d1c5dd0cac1545715b36e9c321f40c669c03",
"size": "173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "REScripts/TSCBViewer/BoTWHeightmap/tile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "9939"
},
{
"name": "Python",
"bytes": "23841"
}
],
"symlink_target": ""
}
|
import unittest
from unittest.mock import Mock, patch
from airflow import models
from airflow.configuration import load_test_config
from airflow.models.xcom import MAX_XCOM_SIZE
from airflow.operators.google_api_to_s3_transfer import GoogleApiToS3Transfer
from airflow.utils import db
class TestGoogleApiToS3Transfer(unittest.TestCase):
def setUp(self):
load_test_config()
db.merge_conn(
models.Connection(
conn_id='google_test',
host='google',
schema='refresh_token',
login='client_id',
password='client_secret'
)
)
db.merge_conn(
models.Connection(
conn_id='s3_test',
conn_type='s3',
schema='test',
extra='{"aws_access_key_id": "aws_access_key_id", "aws_secret_access_key":'
' "aws_secret_access_key"}'
)
)
self.kwargs = {
'gcp_conn_id': 'google_test',
'google_api_service_name': 'test_service',
'google_api_service_version': 'v3',
'google_api_endpoint_path': 'analyticsreporting.reports.batchGet',
'google_api_endpoint_params': {},
'google_api_pagination': False,
'google_api_num_retries': 0,
'aws_conn_id': 's3_test',
's3_destination_key': 'test/google_api_to_s3_test.csv',
's3_overwrite': True,
'task_id': 'task_id',
'dag': None
}
@patch('airflow.operators.google_api_to_s3_transfer.GoogleDiscoveryApiHook.query')
@patch('airflow.operators.google_api_to_s3_transfer.S3Hook.load_string')
@patch('airflow.operators.google_api_to_s3_transfer.json.dumps')
def test_execute(self, mock_json_dumps, mock_s3_hook_load_string, mock_google_api_hook_query):
context = {'task_instance': Mock()}
GoogleApiToS3Transfer(**self.kwargs).execute(context)
mock_google_api_hook_query.assert_called_once_with(
endpoint=self.kwargs['google_api_endpoint_path'],
data=self.kwargs['google_api_endpoint_params'],
paginate=self.kwargs['google_api_pagination'],
num_retries=self.kwargs['google_api_num_retries']
)
mock_json_dumps.assert_called_once_with(mock_google_api_hook_query.return_value)
mock_s3_hook_load_string.assert_called_once_with(
string_data=mock_json_dumps.return_value,
key=self.kwargs['s3_destination_key'],
replace=self.kwargs['s3_overwrite']
)
context['task_instance'].xcom_pull.assert_not_called()
context['task_instance'].xcom_push.assert_not_called()
@patch('airflow.operators.google_api_to_s3_transfer.GoogleDiscoveryApiHook.query')
@patch('airflow.operators.google_api_to_s3_transfer.S3Hook.load_string')
@patch('airflow.operators.google_api_to_s3_transfer.json.dumps')
def test_execute_with_xcom(self, mock_json_dumps, mock_s3_hook_load_string, mock_google_api_hook_query):
context = {'task_instance': Mock()}
xcom_kwargs = {
'google_api_response_via_xcom': 'response',
'google_api_endpoint_params_via_xcom': 'params',
'google_api_endpoint_params_via_xcom_task_ids': 'params',
}
context['task_instance'].xcom_pull.return_value = {}
GoogleApiToS3Transfer(**self.kwargs, **xcom_kwargs).execute(context)
mock_google_api_hook_query.assert_called_once_with(
endpoint=self.kwargs['google_api_endpoint_path'],
data=self.kwargs['google_api_endpoint_params'],
paginate=self.kwargs['google_api_pagination'],
num_retries=self.kwargs['google_api_num_retries']
)
mock_json_dumps.assert_called_once_with(mock_google_api_hook_query.return_value)
mock_s3_hook_load_string.assert_called_once_with(
string_data=mock_json_dumps.return_value,
key=self.kwargs['s3_destination_key'],
replace=self.kwargs['s3_overwrite']
)
context['task_instance'].xcom_pull.assert_called_once_with(
task_ids=xcom_kwargs['google_api_endpoint_params_via_xcom_task_ids'],
key=xcom_kwargs['google_api_endpoint_params_via_xcom']
)
context['task_instance'].xcom_push.assert_called_once_with(
key=xcom_kwargs['google_api_response_via_xcom'],
value=mock_google_api_hook_query.return_value
)
@patch('airflow.operators.google_api_to_s3_transfer.GoogleDiscoveryApiHook.query')
@patch('airflow.operators.google_api_to_s3_transfer.S3Hook.load_string')
@patch('airflow.operators.google_api_to_s3_transfer.json.dumps')
@patch('airflow.operators.google_api_to_s3_transfer.sys.getsizeof', return_value=MAX_XCOM_SIZE)
def test_execute_with_xcom_exceeded_max_xcom_size(
self,
mock_sys_getsizeof,
mock_json_dumps,
mock_s3_hook_load_string,
mock_google_api_hook_query
):
context = {'task_instance': Mock()}
xcom_kwargs = {
'google_api_response_via_xcom': 'response',
'google_api_endpoint_params_via_xcom': 'params',
'google_api_endpoint_params_via_xcom_task_ids': 'params',
}
context['task_instance'].xcom_pull.return_value = {}
self.assertRaises(RuntimeError, GoogleApiToS3Transfer(**self.kwargs, **xcom_kwargs).execute, context)
mock_google_api_hook_query.assert_called_once_with(
endpoint=self.kwargs['google_api_endpoint_path'],
data=self.kwargs['google_api_endpoint_params'],
paginate=self.kwargs['google_api_pagination'],
num_retries=self.kwargs['google_api_num_retries']
)
mock_json_dumps.assert_called_once_with(mock_google_api_hook_query.return_value)
mock_s3_hook_load_string.assert_called_once_with(
string_data=mock_json_dumps.return_value,
key=self.kwargs['s3_destination_key'],
replace=self.kwargs['s3_overwrite']
)
context['task_instance'].xcom_pull.assert_called_once_with(
task_ids=xcom_kwargs['google_api_endpoint_params_via_xcom_task_ids'],
key=xcom_kwargs['google_api_endpoint_params_via_xcom']
)
context['task_instance'].xcom_push.assert_not_called()
mock_sys_getsizeof.assert_called_once_with(mock_google_api_hook_query.return_value)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "4806f0a206e986ca2d67a72d04fa9452",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 109,
"avg_line_length": 43.79333333333334,
"alnum_prop": 0.6227736337342061,
"repo_name": "Fokko/incubator-airflow",
"id": "b942a2223ca543ffd71fc51b716d87db5624613d",
"size": "7382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/operators/test_google_api_to_s3_transfer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "14170"
},
{
"name": "HTML",
"bytes": "145596"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "8787104"
},
{
"name": "Shell",
"bytes": "187296"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
}
|
from anki.template.template import Template
from anki.template.view import View
def render(template, context=None, **kwargs):
context = context and context.copy() or {}
context.update(kwargs)
return Template(template, context).render()
|
{
"content_hash": "173ad696c645f5c5d324a955bd7fd3d9",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 47,
"avg_line_length": 35.57142857142857,
"alnum_prop": 0.7469879518072289,
"repo_name": "jlitven/vexer",
"id": "955518123d9e1c05238fb90da73c8df320da379d",
"size": "249",
"binary": false,
"copies": "24",
"ref": "refs/heads/master",
"path": "src/anki/template/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "761396"
}
],
"symlink_target": ""
}
|
import os
import re
import fnmatch
import glob
import time
import logging
import mimetypes
import subprocess
import textwrap
import uuid
from io import StringIO
from pathlib import Path
from datetime import date
import warnings
import jinja2
from ruamel.yaml import YAML
try:
import github3
_have_github3 = True
except ImportError:
github3 = object
_have_github3 = False
try:
import pygit2
except ImportError:
PygitRemoteCallbacks = object
GitError = Exception
else:
PygitRemoteCallbacks = pygit2.RemoteCallbacks
GitError = pygit2.GitError
from ..utils.source import ArrowSources
for pkg in ["requests", "urllib3", "github3"]:
logging.getLogger(pkg).setLevel(logging.WARNING)
logger = logging.getLogger("crossbow")
class CrossbowError(Exception):
pass
def _flatten(mapping):
"""Converts a hierarchical mapping to a flat dictionary"""
result = {}
for k, v in mapping.items():
if isinstance(v, dict):
for ik, iv in _flatten(v).items():
ik = ik if isinstance(ik, tuple) else (ik,)
result[(k,) + ik] = iv
elif isinstance(v, list):
for ik, iv in enumerate(_flatten(v)):
ik = ik if isinstance(ik, tuple) else (ik,)
result[(k,) + ik] = iv
else:
result[(k,)] = v
return result
def _unflatten(mapping):
"""Converts a flat tuple => object mapping to hierarchical one"""
result = {}
for path, value in mapping.items():
parents, leaf = path[:-1], path[-1]
# create the hierarchy until we reach the leaf value
temp = result
for parent in parents:
temp.setdefault(parent, {})
temp = temp[parent]
# set the leaf value
temp[leaf] = value
return result
def _unflatten_tree(files):
"""Converts a flat path => object mapping to a hierarchical directories
Input:
{
'path/to/file.a': a_content,
'path/to/file.b': b_content,
'path/file.c': c_content
}
Output:
{
'path': {
'to': {
'file.a': a_content,
'file.b': b_content
},
'file.c': c_content
}
}
"""
files = {tuple(k.split('/')): v for k, v in files.items()}
return _unflatten(files)
def _render_jinja_template(searchpath, template, params):
def format_all(items, pattern):
return [pattern.format(item) for item in items]
loader = jinja2.FileSystemLoader(searchpath)
env = jinja2.Environment(loader=loader, trim_blocks=True,
lstrip_blocks=True,
undefined=jinja2.StrictUndefined)
env.filters['format_all'] = format_all
template = env.get_template(template)
return template.render(**params)
# configurations for setting up branch skipping
# - appveyor has a feature to skip builds without an appveyor.yml
# - travis reads from the default branch and applies the rules
# - circle requires the configuration to be present on all branch, even ones
# that are configured to be skipped
# - azure skips branches without azure-pipelines.yml by default
# - github skips branches without .github/workflows/ by default
_default_travis_yml = """
branches:
only:
- master
- /.*-travis-.*/
os: linux
dist: trusty
language: generic
"""
_default_circle_yml = """
version: 2
jobs:
build:
machine: true
workflows:
version: 2
build:
jobs:
- build:
filters:
branches:
only:
- /.*-circle-.*/
"""
_default_tree = {
'.travis.yml': _default_travis_yml,
'.circleci/config.yml': _default_circle_yml
}
class GitRemoteCallbacks(PygitRemoteCallbacks):
def __init__(self, token):
self.token = token
self.attempts = 0
super().__init__()
def push_update_reference(self, refname, message):
pass
def update_tips(self, refname, old, new):
pass
def credentials(self, url, username_from_url, allowed_types):
# its a libgit2 bug, that it infinitely retries the authentication
self.attempts += 1
if self.attempts >= 5:
# pygit2 doesn't propagate the exception properly
msg = 'Wrong oauth personal access token'
print(msg)
raise CrossbowError(msg)
if (allowed_types &
pygit2.credentials.GIT_CREDENTIAL_USERPASS_PLAINTEXT):
return pygit2.UserPass(self.token, 'x-oauth-basic')
else:
return None
def _git_ssh_to_https(url):
return url.replace('git@github.com:', 'https://github.com/')
def _parse_github_user_repo(remote_url):
# TODO: use a proper URL parser instead?
m = re.match(r'.*\/([^\/]+)\/([^\/\.]+)(\.git|/)?$', remote_url)
if m is None:
# Perhaps it's simply "username/reponame"?
m = re.match(r'^(\w+)/(\w+)$', remote_url)
if m is None:
raise CrossbowError(
f"Unable to parse the github owner and repository from the "
f"repository's remote url {remote_url!r}"
)
user, repo = m.group(1), m.group(2)
return user, repo
class Repo:
"""
Base class for interaction with local git repositories
A high level wrapper used for both reading revision information from
arrow's repository and pushing continuous integration tasks to the queue
repository.
Parameters
----------
require_https : boolean, default False
Raise exception for SSH origin URLs
"""
def __init__(self, path, github_token=None, remote_url=None,
require_https=False):
self.path = Path(path)
self.github_token = github_token
self.require_https = require_https
self._remote_url = remote_url
self._pygit_repo = None
self._github_repo = None # set by as_github_repo()
self._updated_refs = []
def __str__(self):
tpl = textwrap.dedent('''
Repo: {remote}@{branch}
Commit: {head}
''')
return tpl.format(
remote=self.remote_url,
branch=self.branch.branch_name,
head=self.head
)
@property
def repo(self):
if self._pygit_repo is None:
self._pygit_repo = pygit2.Repository(str(self.path))
return self._pygit_repo
@property
def origin(self):
remote = self.repo.remotes['origin']
if self.require_https and remote.url.startswith('git@github.com'):
raise CrossbowError("Change SSH origin URL to HTTPS to use "
"Crossbow: {}".format(remote.url))
return remote
def fetch(self, retry=3):
refspec = '+refs/heads/*:refs/remotes/origin/*'
attempt = 1
while True:
try:
self.origin.fetch([refspec])
break
except GitError as e:
if retry and attempt < retry:
attempt += 1
else:
raise e
def push(self, refs=None, github_token=None):
github_token = github_token or self.github_token
if github_token is None:
raise RuntimeError(
'Could not determine GitHub token. Please set the '
'CROSSBOW_GITHUB_TOKEN environment variable to a '
'valid GitHub access token or pass one to --github-token.'
)
callbacks = GitRemoteCallbacks(github_token)
refs = refs or []
try:
self.origin.push(refs + self._updated_refs, callbacks=callbacks)
except pygit2.GitError:
raise RuntimeError('Failed to push updated references, '
'potentially because of credential issues: {}'
.format(self._updated_refs))
else:
self.updated_refs = []
@property
def head(self):
"""Currently checked out commit's sha"""
return self.repo.head
@property
def branch(self):
"""Currently checked out branch"""
try:
return self.repo.branches[self.repo.head.shorthand]
except KeyError:
raise CrossbowError(
'Cannot determine the current branch of the Arrow repository '
'to clone or push to, perhaps it is in detached HEAD state. '
'Please checkout a branch.'
)
@property
def remote(self):
"""Currently checked out branch's remote counterpart"""
try:
return self.repo.remotes[self.branch.upstream.remote_name]
except (AttributeError, KeyError):
raise CrossbowError(
'Cannot determine git remote for the Arrow repository to '
'clone or push to, try to push the `{}` branch first to have '
'a remote tracking counterpart.'.format(self.branch.name)
)
@property
def remote_url(self):
"""Currently checked out branch's remote counterpart URL
If an SSH github url is set, it will be replaced by the https
equivalent usable with GitHub OAuth token.
"""
return self._remote_url or _git_ssh_to_https(self.remote.url)
@property
def user_name(self):
try:
return next(self.repo.config.get_multivar('user.name'))
except StopIteration:
return os.environ.get('GIT_COMMITTER_NAME', 'unknown')
@property
def user_email(self):
try:
return next(self.repo.config.get_multivar('user.email'))
except StopIteration:
return os.environ.get('GIT_COMMITTER_EMAIL', 'unknown')
@property
def signature(self):
return pygit2.Signature(self.user_name, self.user_email,
int(time.time()))
@property
def default_branch_name(self):
default_branch_name = os.getenv("ARCHERY_DEFAULT_BRANCH")
if default_branch_name is None:
try:
ref_obj = self.repo.references["refs/remotes/origin/HEAD"]
target_name = ref_obj.target
target_name_tokenized = target_name.split("/")
default_branch_name = target_name_tokenized[-1]
except KeyError:
# TODO: ARROW-18011 to track changing the hard coded default
# value from "master" to "main".
default_branch_name = "master"
warnings.warn('Unable to determine default branch name: '
'ARCHERY_DEFAULT_BRANCH environment variable is '
'not set. Git repository does not contain a '
'\'refs/remotes/origin/HEAD\'reference. Setting '
'the default branch name to ' +
default_branch_name, RuntimeWarning)
return default_branch_name
def create_tree(self, files):
builder = self.repo.TreeBuilder()
for filename, content in files.items():
if isinstance(content, dict):
# create a subtree
tree_id = self.create_tree(content)
builder.insert(filename, tree_id, pygit2.GIT_FILEMODE_TREE)
else:
# create a file
blob_id = self.repo.create_blob(content)
builder.insert(filename, blob_id, pygit2.GIT_FILEMODE_BLOB)
tree_id = builder.write()
return tree_id
def create_commit(self, files, parents=None, message='',
reference_name=None):
if parents is None:
# by default use the main branch as the base of the new branch
# required to reuse github actions cache across crossbow tasks
commit, _ = self.repo.resolve_refish(self.default_branch_name)
parents = [commit.id]
tree_id = self.create_tree(files)
author = committer = self.signature
commit_id = self.repo.create_commit(reference_name, author, committer,
message, tree_id, parents)
return self.repo[commit_id]
def create_branch(self, branch_name, files, parents=None, message='',
signature=None):
# create commit with the passed tree
commit = self.create_commit(files, parents=parents, message=message)
# create branch pointing to the previously created commit
branch = self.repo.create_branch(branch_name, commit)
# append to the pushable references
self._updated_refs.append('refs/heads/{}'.format(branch_name))
return branch
def create_tag(self, tag_name, commit_id, message=''):
tag_id = self.repo.create_tag(tag_name, commit_id,
pygit2.GIT_OBJ_COMMIT, self.signature,
message)
# append to the pushable references
self._updated_refs.append('refs/tags/{}'.format(tag_name))
return self.repo[tag_id]
def file_contents(self, commit_id, file):
commit = self.repo[commit_id]
entry = commit.tree[file]
blob = self.repo[entry.id]
return blob.data
def as_github_repo(self, github_token=None):
"""Converts it to a repository object which wraps the GitHub API"""
if self._github_repo is None:
if not _have_github3:
raise ImportError('Must install github3.py')
github_token = github_token or self.github_token
username, reponame = _parse_github_user_repo(self.remote_url)
session = github3.session.GitHubSession(
default_connect_timeout=10,
default_read_timeout=30
)
github = github3.GitHub(session=session)
github.login(token=github_token)
self._github_repo = github.repository(username, reponame)
return self._github_repo
def github_commit(self, sha):
repo = self.as_github_repo()
return repo.commit(sha)
def github_release(self, tag):
repo = self.as_github_repo()
try:
return repo.release_from_tag(tag)
except github3.exceptions.NotFoundError:
return None
def github_upload_asset_requests(self, release, path, name, mime,
max_retries=None, retry_backoff=None):
if max_retries is None:
max_retries = int(os.environ.get('CROSSBOW_MAX_RETRIES', 8))
if retry_backoff is None:
retry_backoff = int(os.environ.get('CROSSBOW_RETRY_BACKOFF', 5))
for i in range(max_retries):
try:
with open(path, 'rb') as fp:
result = release.upload_asset(name=name, asset=fp,
content_type=mime)
except github3.exceptions.ResponseError as e:
logger.error('Attempt {} has failed with message: {}.'
.format(i + 1, str(e)))
logger.error('Error message {}'.format(e.msg))
logger.error('List of errors provided by Github:')
for err in e.errors:
logger.error(' - {}'.format(err))
if e.code == 422:
# 422 Validation Failed, probably raised because
# ReleaseAsset already exists, so try to remove it before
# reattempting the asset upload
for asset in release.assets():
if asset.name == name:
logger.info('Release asset {} already exists, '
'removing it...'.format(name))
asset.delete()
logger.info('Asset {} removed.'.format(name))
break
except github3.exceptions.ConnectionError as e:
logger.error('Attempt {} has failed with message: {}.'
.format(i + 1, str(e)))
else:
logger.info('Attempt {} has finished.'.format(i + 1))
return result
time.sleep(retry_backoff)
raise RuntimeError('Github asset uploading has failed!')
def github_upload_asset_curl(self, release, path, name, mime):
upload_url, _ = release.upload_url.split('{?')
upload_url += '?name={}'.format(name)
command = [
'curl',
'--fail',
'-H', "Authorization: token {}".format(self.github_token),
'-H', "Content-Type: {}".format(mime),
'--data-binary', '@{}'.format(path),
upload_url
]
return subprocess.run(command, shell=False, check=True)
def github_overwrite_release_assets(self, tag_name, target_commitish,
patterns, method='requests'):
# Since github has changed something the asset uploading via requests
# got instable, so prefer the cURL alternative.
# Potential cause:
# sigmavirus24/github3.py/issues/779#issuecomment-379470626
repo = self.as_github_repo()
if not tag_name:
raise CrossbowError('Empty tag name')
if not target_commitish:
raise CrossbowError('Empty target commit for the release tag')
# remove the whole release if it already exists
try:
release = repo.release_from_tag(tag_name)
except github3.exceptions.NotFoundError:
pass
else:
release.delete()
release = repo.create_release(tag_name, target_commitish)
for pattern in patterns:
for path in glob.glob(pattern, recursive=True):
name = os.path.basename(path)
size = os.path.getsize(path)
mime = mimetypes.guess_type(name)[0] or 'application/zip'
logger.info(
'Uploading asset `{}` with mimetype {} and size {}...'
.format(name, mime, size)
)
if method == 'requests':
self.github_upload_asset_requests(release, path, name=name,
mime=mime)
elif method == 'curl':
self.github_upload_asset_curl(release, path, name=name,
mime=mime)
else:
raise CrossbowError(
'Unsupported upload method {}'.format(method)
)
def github_pr(self, title, head=None, base=None, body=None,
github_token=None, create=False):
if create:
# Default value for base is the default_branch_name
base = self.default_branch_name if base is None else base
github_token = github_token or self.github_token
repo = self.as_github_repo(github_token=github_token)
if create:
return repo.create_pull(title=title, base=base, head=head,
body=body)
else:
# Retrieve open PR for base and head.
# There should be a single open one with that title.
for pull in repo.pull_requests(state="open", head=head,
base=base):
if title in pull.title:
return pull
raise CrossbowError(
f"Pull request with Title: {title!r} not found "
f"in repository {repo.full_name!r}"
)
class Queue(Repo):
def _latest_prefix_id(self, prefix):
pattern = re.compile(r'[\w\/-]*{}-(\d+)'.format(prefix))
matches = list(filter(None, map(pattern.match, self.repo.branches)))
if matches:
latest = max(int(m.group(1)) for m in matches)
else:
latest = -1
return latest
def _prefix_contains_date(self, prefix):
prefix_date_pattern = re.compile(r'[\w\/-]*-(\d+)-(\d+)-(\d+)')
match_prefix = prefix_date_pattern.match(prefix)
if match_prefix:
return match_prefix.group(0)[-10:]
def _latest_prefix_date(self, prefix):
pattern = re.compile(r'[\w\/-]*{}-(\d+)-(\d+)-(\d+)'.format(prefix))
matches = list(filter(None, map(pattern.match, self.repo.branches)))
if matches:
latest = sorted([m.group(0) for m in matches])[-1]
# slice the trailing date part (YYYY-MM-DD)
latest = latest[-10:]
else:
latest = -1
return latest
def _next_job_id(self, prefix):
"""Auto increments the branch's identifier based on the prefix"""
latest_id = self._latest_prefix_id(prefix)
return '{}-{}'.format(prefix, latest_id + 1)
def _new_hex_id(self, prefix):
"""Append a new id to branch's identifier based on the prefix"""
hex_id = uuid.uuid4().hex[:10]
return '{}-{}'.format(prefix, hex_id)
def latest_for_prefix(self, prefix):
prefix_date = self._prefix_contains_date(prefix)
if prefix.startswith("nightly") and not prefix_date:
latest_id = self._latest_prefix_date(prefix)
if not latest_id:
raise RuntimeError(
f"No job has been submitted with prefix '{prefix}'' yet"
)
latest_id += "-0"
else:
latest_id = self._latest_prefix_id(prefix)
if latest_id < 0:
raise RuntimeError(
f"No job has been submitted with prefix '{prefix}' yet"
)
job_name = '{}-{}'.format(prefix, latest_id)
return self.get(job_name)
def date_of(self, job):
# it'd be better to bound to the queue repository on deserialization
# and reorganize these methods to Job
branch_name = 'origin/{}'.format(job.branch)
branch = self.repo.branches[branch_name]
commit = self.repo[branch.target]
return date.fromtimestamp(commit.commit_time)
def jobs(self, pattern):
"""Return jobs sorted by its identifier in reverse order"""
job_names = []
for name in self.repo.branches.remote:
origin, name = name.split('/', 1)
result = re.match(pattern, name)
if result:
job_names.append(name)
for name in sorted(job_names, reverse=True):
yield self.get(name)
def get(self, job_name):
branch_name = 'origin/{}'.format(job_name)
branch = self.repo.branches[branch_name]
try:
content = self.file_contents(branch.target, 'job.yml')
except KeyError:
raise CrossbowError(
'No job is found with name: {}'.format(job_name)
)
buffer = StringIO(content.decode('utf-8'))
job = yaml.load(buffer)
job.queue = self
return job
def put(self, job, prefix='build', increment_job_id=True):
if not isinstance(job, Job):
raise CrossbowError('`job` must be an instance of Job')
if job.branch is not None:
raise CrossbowError('`job.branch` is automatically generated, '
'thus it must be blank')
job.queue = self
if increment_job_id:
# auto increment and set next job id, e.g. build-85
job.branch = self._next_job_id(prefix)
else:
# set new branch to something unique, e.g. build-41d017af40
job.branch = self._new_hex_id(prefix)
# create tasks' branches
for task_name, task in job.tasks.items():
# adding CI's name to the end of the branch in order to use skip
# patterns on travis and circleci
task.branch = '{}-{}-{}'.format(job.branch, task.ci, task_name)
params = {
**job.params,
"arrow": job.target,
"job": job,
"queue_remote_url": self.remote_url
}
files = task.render_files(job.template_searchpath, params=params)
branch = self.create_branch(task.branch, files=files)
self.create_tag(task.tag, branch.target)
task.commit = str(branch.target)
# create job's branch with its description
return self.create_branch(job.branch, files=job.render_files())
def get_version(root, **kwargs):
"""
Parse function for setuptools_scm that ignores tags for non-C++
subprojects, e.g. apache-arrow-js-XXX tags.
"""
from setuptools_scm.git import parse as parse_git_version
# query the calculated version based on the git tags
kwargs['describe_command'] = (
'git describe --dirty --tags --long --match "apache-arrow-[0-9]*.*"'
)
version = parse_git_version(root, **kwargs)
tag = str(version.tag)
# We may get a development tag for the next version, such as "5.0.0.dev0",
# or the tag of an already released version, such as "4.0.0".
# In the latter case, we need to increment the version so that the computed
# version comes after any patch release (the next feature version after
# 4.0.0 is 5.0.0).
pattern = r"^(\d+)\.(\d+)\.(\d+)"
match = re.match(pattern, tag)
major, minor, patch = map(int, match.groups())
if 'dev' not in tag:
major += 1
return "{}.{}.{}.dev{}".format(major, minor, patch, version.distance or 0)
class Serializable:
@classmethod
def to_yaml(cls, representer, data):
tag = '!{}'.format(cls.__name__)
dct = {k: v for k, v in data.__dict__.items() if not k.startswith('_')}
return representer.represent_mapping(tag, dct)
class Target(Serializable):
"""
Describes target repository and revision the builds run against
This serializable data container holding information about arrow's
git remote, branch, sha and version number as well as some metadata
(currently only an email address where the notification should be sent).
"""
def __init__(self, head, branch, remote, version, r_version, email=None):
self.head = head
self.email = email
self.branch = branch
self.remote = remote
self.github_repo = "/".join(_parse_github_user_repo(remote))
self.version = version
self.r_version = r_version
self.no_rc_version = re.sub(r'-rc\d+\Z', '', version)
self.no_rc_r_version = re.sub(r'-rc\d+\Z', '', r_version)
# TODO(ARROW-17552): Remove "master" from default_branch after
# migration to "main".
self.default_branch = ['main', 'master']
# Semantic Versioning 1.0.0: https://semver.org/spec/v1.0.0.html
#
# > A pre-release version number MAY be denoted by appending an
# > arbitrary string immediately following the patch version and a
# > dash. The string MUST be comprised of only alphanumerics plus
# > dash [0-9A-Za-z-].
#
# Example:
#
# '0.16.1.dev10' ->
# '0.16.1-dev10'
self.no_rc_semver_version = \
re.sub(r'\.(dev\d+)\Z', r'-\1', self.no_rc_version)
# Substitute dev version for SNAPSHOT
#
# Example:
#
# '10.0.0.dev235' ->
# '10.0.0-SNAPSHOT'
self.no_rc_snapshot_version = re.sub(
r'\.(dev\d+)$', '-SNAPSHOT', self.no_rc_version)
@classmethod
def from_repo(cls, repo, head=None, branch=None, remote=None, version=None,
email=None):
"""Initialize from a repository
Optionally override detected remote, branch, head, and/or version.
"""
assert isinstance(repo, Repo)
if head is None:
head = str(repo.head.target)
if branch is None:
branch = repo.branch.branch_name
if remote is None:
remote = repo.remote_url
if version is None:
version = get_version(repo.path)
if email is None:
email = repo.user_email
version_dev_match = re.match(r".*\.dev(\d+)$", version)
if version_dev_match:
with open(f"{repo.path}/r/DESCRIPTION") as description_file:
description = description_file.read()
r_version_pattern = re.compile(r"^Version:\s*(.*)$",
re.MULTILINE)
r_version = re.findall(r_version_pattern, description)[0]
if r_version:
version_dev = int(version_dev_match[1])
# "1_0000_00_00 +" is for generating a greater version
# than YYYYMMDD. For example, 1_0000_00_01
# (version_dev == 1 case) is greater than 2022_10_16.
#
# Why do we need a greater version than YYYYMMDD? It's
# for keeping backward compatibility. We used
# MAJOR.MINOR.PATCH.YYYYMMDD as our nightly package
# version. (See also ARROW-16403). If we use "9000 +
# version_dev" here, a developer that used
# 9.0.0.20221016 can't upgrade to the later nightly
# package unless we release 10.0.0. Because 9.0.0.9234
# or something is less than 9.0.0.20221016.
r_version_dev = 1_0000_00_00 + version_dev
# version: 10.0.0.dev234
# r_version: 9.0.0.9000
# -> 9.0.0.100000234
r_version = re.sub(r"\.9000\Z", f".{r_version_dev}", r_version)
else:
r_version = version
else:
r_version = version
return cls(head=head, email=email, branch=branch, remote=remote,
version=version, r_version=r_version)
def is_default_branch(self):
# TODO(ARROW-17552): Switch the condition to "is" instead of "in"
# once "master" is removed from "default_branch".
return self.branch in self.default_branch
class Task(Serializable):
"""
Describes a build task and metadata required to render CI templates
A task is represented as a single git commit and branch containing jinja2
rendered files (currently appveyor.yml or .travis.yml configurations).
A task can't be directly submitted to a queue, must belong to a job.
Each task's unique identifier is its branch name, which is generated after
submitting the job to a queue.
"""
def __init__(self, name, ci, template, artifacts=None, params=None):
assert ci in {
'circle',
'travis',
'appveyor',
'azure',
'github',
'drone',
}
self.name = name
self.ci = ci
self.template = template
self.artifacts = artifacts or []
self.params = params or {}
self.branch = None # filled after adding to a queue
self.commit = None # filled after adding to a queue
self._queue = None # set by the queue object after put or get
self._status = None # status cache
self._assets = None # assets cache
def render_files(self, searchpath, params=None):
params = {**self.params, **(params or {}), "task": self}
try:
rendered = _render_jinja_template(searchpath, self.template,
params=params)
except jinja2.TemplateError as e:
raise RuntimeError(
'Failed to render template `{}` with {}: {}'.format(
self.template, e.__class__.__name__, str(e)
)
)
tree = {**_default_tree, self.filename: rendered}
return _unflatten_tree(tree)
@property
def tag(self):
return self.branch
@property
def filename(self):
config_files = {
'circle': '.circleci/config.yml',
'travis': '.travis.yml',
'appveyor': 'appveyor.yml',
'azure': 'azure-pipelines.yml',
'github': '.github/workflows/crossbow.yml',
'drone': '.drone.yml',
}
return config_files[self.ci]
def status(self, force_query=False):
_status = getattr(self, '_status', None)
if force_query or _status is None:
github_commit = self._queue.github_commit(self.commit)
self._status = TaskStatus(github_commit)
return self._status
def assets(self, force_query=False, validate_patterns=True):
_assets = getattr(self, '_assets', None)
if force_query or _assets is None:
github_release = self._queue.github_release(self.tag)
self._assets = TaskAssets(github_release,
artifact_patterns=self.artifacts,
validate_patterns=validate_patterns)
return self._assets
class TaskStatus:
"""
Combine the results from status and checks API to a single state.
Azure pipelines uses checks API which doesn't provide a combined
interface like status API does, so we need to manually combine
both the commit statuses and the commit checks coming from
different API endpoint
Status.state: error, failure, pending or success, default pending
CheckRun.status: queued, in_progress or completed, default: queued
CheckRun.conclusion: success, failure, neutral, cancelled, timed_out
or action_required, only set if
CheckRun.status == 'completed'
1. Convert CheckRun's status and conclusion to one of Status.state
2. Merge the states based on the following rules:
- failure if any of the contexts report as error or failure
- pending if there are no statuses or a context is pending
- success if the latest status for all contexts is success
error otherwise.
Parameters
----------
commit : github3.Commit
Commit to query the combined status for.
Returns
-------
TaskStatus(
combined_state='error|failure|pending|success',
github_status='original github status object',
github_check_runs='github checks associated with the commit',
total_count='number of statuses and checks'
)
"""
def __init__(self, commit):
status = commit.status()
check_runs = list(commit.check_runs())
states = [s.state for s in status.statuses]
for check in check_runs:
if check.status == 'completed':
if check.conclusion in {'success', 'failure'}:
states.append(check.conclusion)
elif check.conclusion in {'cancelled', 'timed_out',
'action_required'}:
states.append('error')
# omit `neutral` conclusion
else:
states.append('pending')
# it could be more effective, but the following is more descriptive
combined_state = 'error'
if len(states):
if any(state in {'error', 'failure'} for state in states):
combined_state = 'failure'
elif any(state == 'pending' for state in states):
combined_state = 'pending'
elif all(state == 'success' for state in states):
combined_state = 'success'
# show link to the actual build, some of the CI providers implement
# the statuses API others implement the checks API, so display both
build_links = [s.target_url for s in status.statuses]
build_links += [c.html_url for c in check_runs]
self.combined_state = combined_state
self.github_status = status
self.github_check_runs = check_runs
self.total_count = len(states)
self.build_links = build_links
class TaskAssets(dict):
def __init__(self, github_release, artifact_patterns,
validate_patterns=True):
# HACK(kszucs): don't expect uploaded assets of no atifacts were
# defiened for the tasks in order to spare a bit of github rate limit
if not artifact_patterns:
return
if github_release is None:
github_assets = {} # no assets have been uploaded for the task
else:
github_assets = {a.name: a for a in github_release.assets()}
if not validate_patterns:
# shortcut to avoid pattern validation and just set all artifacts
return self.update(github_assets)
for pattern in artifact_patterns:
# artifact can be a regex pattern
compiled = re.compile(f"^{pattern}$")
matches = list(
filter(None, map(compiled.match, github_assets.keys()))
)
num_matches = len(matches)
# validate artifact pattern matches single asset
if num_matches == 0:
self[pattern] = None
elif num_matches == 1:
self[pattern] = github_assets[matches[0].group(0)]
else:
raise CrossbowError(
'Only a single asset should match pattern `{}`, there are '
'multiple ones: {}'.format(pattern, ', '.join(matches))
)
def missing_patterns(self):
return [pattern for pattern, asset in self.items() if asset is None]
def uploaded_assets(self):
return [asset for asset in self.values() if asset is not None]
class Job(Serializable):
"""Describes multiple tasks against a single target repository"""
def __init__(self, target, tasks, params=None, template_searchpath=None):
if not tasks:
raise ValueError('no tasks were provided for the job')
if not all(isinstance(task, Task) for task in tasks.values()):
raise ValueError('each `tasks` mus be an instance of Task')
if not isinstance(target, Target):
raise ValueError('`target` must be an instance of Target')
if not isinstance(params, dict):
raise ValueError('`params` must be an instance of dict')
self.target = target
self.tasks = tasks
self.params = params or {} # additional parameters for the tasks
self.branch = None # filled after adding to a queue
self._queue = None # set by the queue object after put or get
if template_searchpath is None:
self._template_searchpath = ArrowSources.find().path
else:
self._template_searchpath = template_searchpath
def render_files(self):
with StringIO() as buf:
yaml.dump(self, buf)
content = buf.getvalue()
tree = {**_default_tree, "job.yml": content}
return _unflatten_tree(tree)
def render_tasks(self, params=None):
result = {}
params = {
**self.params,
"arrow": self.target,
"job": self,
**(params or {})
}
for task_name, task in self.tasks.items():
files = task.render_files(self._template_searchpath, params)
result[task_name] = files
return result
@property
def template_searchpath(self):
return self._template_searchpath
@property
def queue(self):
assert isinstance(self._queue, Queue)
return self._queue
@queue.setter
def queue(self, queue):
assert isinstance(queue, Queue)
self._queue = queue
for task in self.tasks.values():
task._queue = queue
@property
def email(self):
return os.environ.get('CROSSBOW_EMAIL', self.target.email)
@property
def date(self):
return self.queue.date_of(self)
def show(self, stream=None):
return yaml.dump(self, stream=stream)
@classmethod
def from_config(cls, config, target, tasks=None, groups=None, params=None):
"""
Intantiate a job from based on a config.
Parameters
----------
config : dict
Deserialized content of tasks.yml
target : Target
Describes target repository and revision the builds run against.
tasks : Optional[List[str]], default None
List of glob patterns for matching task names.
groups : Optional[List[str]], default None
List of exact group names matching predefined task sets in the
config.
params : Optional[Dict[str, str]], default None
Additional rendering parameters for the task templates.
Returns
-------
Job
Raises
------
Exception:
If invalid groups or tasks has been passed.
"""
task_definitions = config.select(tasks, groups=groups)
# instantiate the tasks
tasks = {}
versions = {
'version': target.version,
'no_rc_version': target.no_rc_version,
'no_rc_semver_version': target.no_rc_semver_version,
'no_rc_snapshot_version': target.no_rc_snapshot_version,
'r_version': target.r_version,
'no_rc_r_version': target.no_rc_r_version,
}
for task_name, task in task_definitions.items():
task = task.copy()
artifacts = task.pop('artifacts', None) or [] # because of yaml
artifacts = [fn.format(**versions) for fn in artifacts]
tasks[task_name] = Task(task_name, artifacts=artifacts, **task)
return cls(target=target, tasks=tasks, params=params,
template_searchpath=config.template_searchpath)
def is_finished(self):
for task in self.tasks.values():
status = task.status(force_query=True)
if status.combined_state == 'pending':
return False
return True
def wait_until_finished(self, poll_max_minutes=120,
poll_interval_minutes=10):
started_at = time.time()
while True:
if self.is_finished():
break
waited_for_minutes = (time.time() - started_at) / 60
if waited_for_minutes > poll_max_minutes:
msg = ('Exceeded the maximum amount of time waiting for job '
'to finish, waited for {} minutes.')
raise RuntimeError(msg.format(waited_for_minutes))
logger.info('Waiting {} minutes and then checking again'
.format(poll_interval_minutes))
time.sleep(poll_interval_minutes * 60)
class Config(dict):
def __init__(self, tasks, template_searchpath):
super().__init__(tasks)
self.template_searchpath = template_searchpath
@classmethod
def load_yaml(cls, path):
path = Path(path)
searchpath = path.parent
rendered = _render_jinja_template(searchpath, template=path.name,
params={})
config = yaml.load(rendered)
return cls(config, template_searchpath=searchpath)
def show(self, stream=None):
return yaml.dump(dict(self), stream=stream)
def select(self, tasks=None, groups=None):
config_groups = dict(self['groups'])
config_tasks = dict(self['tasks'])
valid_groups = set(config_groups.keys())
valid_tasks = set(config_tasks.keys())
group_allowlist = list(groups or [])
task_allowlist = list(tasks or [])
# validate that the passed groups are defined in the config
requested_groups = set(group_allowlist)
invalid_groups = requested_groups - valid_groups
if invalid_groups:
msg = 'Invalid group(s) {!r}. Must be one of {!r}'.format(
invalid_groups, valid_groups
)
raise CrossbowError(msg)
# treat the task names as glob patterns to select tasks more easily
requested_tasks = set()
for pattern in task_allowlist:
matches = fnmatch.filter(valid_tasks, pattern)
if len(matches):
requested_tasks.update(matches)
else:
raise CrossbowError(
"Unable to match any tasks for `{}`".format(pattern)
)
requested_group_tasks = set()
for group in group_allowlist:
# separate the patterns from the blocklist patterns
task_patterns = list(config_groups[group])
task_blocklist_patterns = [
x.strip("~") for x in task_patterns if x.startswith("~")]
task_patterns = [x for x in task_patterns if not x.startswith("~")]
# treat the task names as glob patterns to select tasks more easily
for pattern in task_patterns:
matches = fnmatch.filter(valid_tasks, pattern)
if len(matches):
requested_group_tasks.update(matches)
else:
raise CrossbowError(
"Unable to match any tasks for `{}`".format(pattern)
)
# remove any tasks that are negated with ~task-name
for block_pattern in task_blocklist_patterns:
matches = fnmatch.filter(valid_tasks, block_pattern)
if len(matches):
requested_group_tasks = requested_group_tasks.difference(
matches)
else:
raise CrossbowError(
"Unable to match any tasks for `{}`".format(pattern)
)
requested_tasks = requested_tasks.union(requested_group_tasks)
# validate that the passed and matched tasks are defined in the config
invalid_tasks = requested_tasks - valid_tasks
if invalid_tasks:
msg = 'Invalid task(s) {!r}. Must be one of {!r}'.format(
invalid_tasks, valid_tasks
)
raise CrossbowError(msg)
return {
task_name: config_tasks[task_name] for task_name in requested_tasks
}
def validate(self):
# validate that the task groups are properly refering to the tasks
for group_name, group in self['groups'].items():
for pattern in group:
# remove the negation character for blocklisted tasks
pattern = pattern.strip("~")
tasks = self.select(tasks=[pattern])
if not tasks:
raise CrossbowError(
"The pattern `{}` defined for task group `{}` is not "
"matching any of the tasks defined in the "
"configuration file.".format(pattern, group_name)
)
# validate that the tasks are constructible
for task_name, task in self['tasks'].items():
try:
Task(task_name, **task)
except Exception as e:
raise CrossbowError(
'Unable to construct a task object from the '
'definition of task `{}`. The original error message '
'is: `{}`'.format(task_name, str(e))
)
# Get the default branch name from the repository
arrow_source_dir = ArrowSources.find()
repo = Repo(arrow_source_dir.path)
# validate that the defined tasks are renderable, in order to to that
# define the required object with dummy data
target = Target(
head='e279a7e06e61c14868ca7d71dea795420aea6539',
branch=repo.default_branch_name,
remote='https://github.com/apache/arrow',
version='1.0.0dev123',
r_version='0.13.0.100000123',
email='dummy@example.ltd'
)
job = Job.from_config(config=self,
target=target,
tasks=self['tasks'],
groups=self['groups'],
params={})
for task_name, task in self['tasks'].items():
task = Task(task_name, **task)
files = task.render_files(
self.template_searchpath,
params=dict(
arrow=target,
job=job,
queue_remote_url='https://github.com/org/crossbow'
)
)
if not files:
raise CrossbowError('No files have been rendered for task `{}`'
.format(task_name))
# configure yaml serializer
yaml = YAML()
yaml.register_class(Job)
yaml.register_class(Task)
yaml.register_class(Target)
yaml.register_class(Queue)
yaml.register_class(TaskStatus)
|
{
"content_hash": "ff30ae5228e2bb8e742e983e3687483b",
"timestamp": "",
"source": "github",
"line_count": 1343,
"max_line_length": 79,
"avg_line_length": 36.63440059568131,
"alnum_prop": 0.5610772357723577,
"repo_name": "apache/arrow",
"id": "d5ce6143136ae7b27add254edf96348723aade50",
"size": "49986",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "dev/archery/archery/crossbow/core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "3709"
},
{
"name": "Batchfile",
"bytes": "30689"
},
{
"name": "C",
"bytes": "1400442"
},
{
"name": "C#",
"bytes": "1029129"
},
{
"name": "C++",
"bytes": "24661612"
},
{
"name": "CMake",
"bytes": "709915"
},
{
"name": "Cython",
"bytes": "1554440"
},
{
"name": "Dockerfile",
"bytes": "147322"
},
{
"name": "Emacs Lisp",
"bytes": "1064"
},
{
"name": "FreeMarker",
"bytes": "2312"
},
{
"name": "Go",
"bytes": "4586449"
},
{
"name": "HTML",
"bytes": "3430"
},
{
"name": "Java",
"bytes": "7045674"
},
{
"name": "JavaScript",
"bytes": "127157"
},
{
"name": "Jinja",
"bytes": "19948"
},
{
"name": "Lua",
"bytes": "8771"
},
{
"name": "MATLAB",
"bytes": "40399"
},
{
"name": "Makefile",
"bytes": "32873"
},
{
"name": "Meson",
"bytes": "69508"
},
{
"name": "Objective-C++",
"bytes": "11472"
},
{
"name": "Perl",
"bytes": "3803"
},
{
"name": "Python",
"bytes": "3055602"
},
{
"name": "R",
"bytes": "1561613"
},
{
"name": "Ruby",
"bytes": "1615226"
},
{
"name": "Shell",
"bytes": "389942"
},
{
"name": "Thrift",
"bytes": "34246"
},
{
"name": "TypeScript",
"bytes": "1075563"
},
{
"name": "Vala",
"bytes": "24798"
}
],
"symlink_target": ""
}
|
class TileCacheException(Exception): pass
import sys, cgi, time, os, traceback, email, ConfigParser
import Cache, Caches
import Layer, Layers
# Windows doesn't always do the 'working directory' check correctly.
cfgfiles = ("/etc/tilecache.cfg", os.path.join("..", "tilecache.cfg"), "tilecache.cfg")
class Capabilities (object):
def __init__ (self, format, data):
self.format = format
self.data = data
class Request (object):
def __init__ (self, service):
self.service = service
def getLayer(self, layername):
try:
return self.service.layers[layername]
except:
raise TileCacheException("The requested layer (%s) does not exist. Available layers are: \n * %s" % (layername, "\n * ".join(self.service.layers.keys())))
def import_module(name):
"""Helper module to import any module based on a name, and return the module."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class Service (object):
__slots__ = ("layers", "cache", "metadata", "tilecache_options", "config", "files")
def __init__ (self, cache, layers, metadata = {}):
self.cache = cache
self.layers = layers
self.metadata = metadata
def _loadFromSection (cls, config, section, module, **objargs):
type = config.get(section, "type")
for opt in config.options(section):
if opt not in ["type", "module"]:
objargs[opt] = config.get(section, opt)
object_module = None
if config.has_option(section, "module"):
object_module = import_module(config.get(section, "module"))
else:
if module is Layer:
type = type.replace("Layer", "")
object_module = import_module("TileCache.Layers.%s" % type)
else:
type = type.replace("Cache", "")
object_module = import_module("TileCache.Caches.%s" % type)
if object_module == None:
raise TileCacheException("Attempt to load %s failed." % type)
section_object = getattr(object_module, type)
if module is Layer:
return section_object(section, **objargs)
else:
return section_object(**objargs)
loadFromSection = classmethod(_loadFromSection)
def _load (cls, *files):
cache = None
metadata = {}
layers = {}
config = None
try:
config = ConfigParser.ConfigParser()
config.read(files)
if config.has_section("metadata"):
for key in config.options("metadata"):
metadata[key] = config.get("metadata", key)
if config.has_section("tilecache_options"):
if 'path' in config.options("tilecache_options"):
for path in config.get("tilecache_options", "path").split(","):
sys.path.insert(0, path)
cache = cls.loadFromSection(config, "cache", Cache)
layers = {}
for section in config.sections():
if section in cls.__slots__: continue
layers[section] = cls.loadFromSection(
config, section, Layer,
cache = cache)
except Exception, E:
metadata['exception'] = E
metadata['traceback'] = "".join(traceback.format_tb(sys.exc_traceback))
service = cls(cache, layers, metadata)
service.files = files
service.config = config
return service
load = classmethod(_load)
def generate_crossdomain_xml(self):
"""Helper method for generating the XML content for a crossdomain.xml
file, to be used to allow remote sites to access this content."""
xml = ["""<?xml version="1.0"?>
<!DOCTYPE cross-domain-policy SYSTEM
"http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd">
<cross-domain-policy>
"""]
if self.metadata.has_key('crossdomain_sites'):
sites = self.metadata['crossdomain_sites'].split(',')
for site in sites:
xml.append(' <allow-access-from domain="%s" />' % site)
xml.append("</cross-domain-policy>")
return ('text/xml', "\n".join(xml))
def renderTile (self, tile, force = False):
from warnings import warn
start = time.time()
# do more cache checking here: SRS, width, height, layers
layer = tile.layer
image = None
if not force: image = self.cache.get(tile)
if not image:
data = layer.render(tile, force=force)
if (data): image = self.cache.set(tile, data)
else: raise Exception("Zero length data returned from layer.")
if layer.debug:
sys.stderr.write(
"Cache miss: %s, Tile: x: %s, y: %s, z: %s, time: %s\n" % (
tile.bbox(), tile.x, tile.y, tile.z, (time.time() - start)) )
else:
if layer.debug:
sys.stderr.write(
"Cache hit: %s, Tile: x: %s, y: %s, z: %s, time: %s, debug: %s\n" % (
tile.bbox(), tile.x, tile.y, tile.z, (time.time() - start), layer.debug) )
return (layer.mime_type, image)
def expireTile (self, tile):
bbox = tile.bounds()
layer = tile.layer
for z in range(len(layer.resolutions)):
bottomleft = layer.getClosestCell(z, bbox[0:2])
topright = layer.getClosestCell(z, bbox[2:4])
for y in range(bottomleft[1], topright[1] + 1):
for x in range(bottomleft[0], topright[0] + 1):
coverage = Layer.Tile(layer,x,y,z)
self.cache.delete(coverage)
def dispatchRequest (self, params, path_info="/", req_method="GET", host="http://example.com/"):
if self.metadata.has_key('exception'):
raise TileCacheException("%s\n%s" % (self.metadata['exception'], self.metadata['traceback']))
if path_info.find("crossdomain.xml") != -1:
return self.generate_crossdomain_xml()
if path_info.split(".")[-1] == "kml":
from TileCache.Services.KML import KML
return KML(self).parse(params, path_info, host)
if params.has_key("scale") or params.has_key("SCALE"):
from TileCache.Services.WMTS import WMTS
tile = WMTS(self).parse(params, path_info, host)
elif params.has_key("service") or params.has_key("SERVICE") or \
params.has_key("REQUEST") and params['REQUEST'] == "GetMap" or \
params.has_key("request") and params['request'] == "GetMap":
from TileCache.Services.WMS import WMS
tile = WMS(self).parse(params, path_info, host)
elif params.has_key("L") or params.has_key("l") or \
params.has_key("request") and params['request'] == "metadata":
from TileCache.Services.WorldWind import WorldWind
tile = WorldWind(self).parse(params, path_info, host)
elif params.has_key("interface"):
from TileCache.Services.TileService import TileService
tile = TileService(self).parse(params, path_info, host)
elif params.has_key("v") and \
(params['v'] == "mgm" or params['v'] == "mgmaps"):
from TileCache.Services.MGMaps import MGMaps
tile = MGMaps(self).parse(params, path_info, host)
elif params.has_key("tile"):
from TileCache.Services.VETMS import VETMS
tile = VETMS(self).parse(params, path_info, host)
elif params.has_key("format") and params['format'].lower() == "json":
from TileCache.Services.JSON import JSON
return JSON(self).parse(params, path_info, host)
else:
from TileCache.Services.TMS import TMS
tile = TMS(self).parse(params, path_info, host)
if isinstance(tile, Layer.Tile):
if req_method == 'DELETE':
self.expireTile(tile)
return ('text/plain', 'OK')
else:
return self.renderTile(tile, params.has_key('FORCE'))
elif isinstance(tile, list):
if req_method == 'DELETE':
[self.expireTile(t) for t in tile]
return ('text/plain', 'OK')
else:
try:
import PIL.Image as Image
except ImportError:
raise Exception("Combining multiple layers requires Python Imaging Library.")
try:
import cStringIO as StringIO
except ImportError:
import StringIO
result = None
for t in tile:
(format, data) = self.renderTile(t, params.has_key('FORCE'))
image = Image.open(StringIO.StringIO(data))
if not result:
result = image
else:
try:
result.paste(image, None, image)
except Exception, E:
raise Exception("Could not combine images: Is it possible that some layers are not \n8-bit transparent images? \n(Error was: %s)" % E)
buffer = StringIO.StringIO()
result.save(buffer, result.format)
buffer.seek(0)
sys.stderr.write("hej")
return (format, buffer.read())
else:
sys.stderr.write("hej")
return (tile.format, tile.data)
def modPythonHandler (apacheReq, service):
from mod_python import apache, util
try:
if apacheReq.headers_in.has_key("X-Forwarded-Host"):
host = "http://" + apacheReq.headers_in["X-Forwarded-Host"]
else:
host = "http://" + apacheReq.headers_in["Host"]
host += apacheReq.uri[:-len(apacheReq.path_info)]
format, image = service.dispatchRequest(
util.FieldStorage(apacheReq),
apacheReq.path_info,
apacheReq.method,
host )
apacheReq.content_type = format
apacheReq.status = apache.HTTP_OK
# Hack start. Expire added to layers
if format.startswith("image/"):
apacheReq.add_common_vars()
env_vars = apacheReq.subprocess_env
getReqStr = env_vars['QUERY_STRING']
getReqArr = getReqStr.split('&')
getReqDict = {}
for item in getReqArr:
if item:
tempArr = item.split('=')
getReqDict[tempArr[0]] = tempArr[1]
fields = getReqDict
if service.cache.sendfile:
headers.append(('X-SendFile', image))
layer_expire = None
if fields.has_key('layers') or fields.has_key('LAYERS'):
layers = fields.get('layers', fields.get('LAYERS'))
# single layers only
if not ',' in layers:
layer = service.layers[layers]
if layer.expire:
layer_expire = long(layer.expire)
apacheReq.headers_out['Expires'] = email.Utils.formatdate(time.time() + layer_expire, False, True)
if service.cache.expire and not layer_expire:
apacheReq.headers_out['Expires'] = email.Utils.formatdate(time.time() + service.cache.expire, False, True)
# Hack end
apacheReq.set_content_length(len(image))
apacheReq.send_http_header()
if format.startswith("image/") and service.cache.sendfile:
apacheReq.write("")
else:
apacheReq.write(image)
except TileCacheException, E:
apacheReq.content_type = "text/plain"
apacheReq.status = apache.HTTP_NOT_FOUND
apacheReq.send_http_header()
apacheReq.write("An error occurred: %s\n" % (str(E)))
except Exception, E:
apacheReq.content_type = "text/plain"
apacheReq.status = apache.HTTP_INTERNAL_SERVER_ERROR
apacheReq.send_http_header()
apacheReq.write("An error occurred: %s\n%s\n" % (
str(E),
"".join(traceback.format_tb(sys.exc_traceback))))
return apache.OK
def wsgiHandler (environ, start_response, service):
from paste.request import parse_formvars
try:
path_info = host = ""
if "PATH_INFO" in environ:
path_info = environ["PATH_INFO"]
if "HTTP_X_FORWARDED_HOST" in environ:
host = "http://" + environ["HTTP_X_FORWARDED_HOST"]
elif "HTTP_HOST" in environ:
host = "http://" + environ["HTTP_HOST"]
host += environ["SCRIPT_NAME"]
req_method = environ["REQUEST_METHOD"]
fields = parse_formvars(environ)
format, image = service.dispatchRequest( fields, path_info, req_method, host )
headers = [('Content-Type',format)]
if format.startswith("image/"):
if service.cache.sendfile:
headers.append(('X-SendFile', image))
if service.cache.expire:
headers.append(('Expires', email.Utils.formatdate(time.time() + service.cache.expire, False, True)))
# Hack start. Expire added to layers
if format.startswith("image/"):
if service.cache.sendfile:
headers.append(('X-SendFile', image))
layer_expire = None
if fields.has_key('layers') or fields.has_key('LAYERS'):
layers = fields.get('layers', fields.get('LAYERS'))
# single layers only
if not ',' in layers:
layer = service.layers[layers]
if layer.expire:
layer_expire = long(layer.expire)
headers.append(('Expires', email.Utils.formatdate(time.time() + layer_expire, False, True)))
if service.cache.expire and not layer_expire:
headers.append(('Expires', email.Utils.formatdate(time.time() + service.cache.expire, False, True)))
# Hack end
start_response("200 OK", headers)
if service.cache.sendfile and format.startswith("image/"):
return []
else:
return [image]
except TileCacheException, E:
start_response("404 Tile Not Found"+str(E), [('Content-Type','text/plain')])
return ["An error occurred: %s" % (str(E))]
except Exception, E:
start_response("500 Internal Server Error", [('Content-Type','text/plain')])
return ["An error occurred: %s\n%s\n" % (
str(E),
"".join(traceback.format_tb(sys.exc_traceback)))]
def cgiHandler (service):
try:
params = {}
input = cgi.FieldStorage()
for key in input.keys(): params[key] = input[key].value
path_info = host = ""
if "PATH_INFO" in os.environ:
path_info = os.environ["PATH_INFO"]
if "HTTP_X_FORWARDED_HOST" in os.environ:
host = "http://" + os.environ["HTTP_X_FORWARDED_HOST"]
elif "HTTP_HOST" in os.environ:
host = "http://" + os.environ["HTTP_HOST"]
host += os.environ["SCRIPT_NAME"]
req_method = os.environ["REQUEST_METHOD"]
format, image = service.dispatchRequest( params, path_info, req_method, host )
print "Content-type: %s" % format
if format.startswith("image/"):
if service.cache.sendfile:
print "X-SendFile: %s" % image
if service.cache.expire:
print "Expires: %s" % email.Utils.formatdate(time.time() + service.cache.expire, False, True)
print ""
if (not service.cache.sendfile) or (not format.startswith("image/")):
if sys.platform == "win32":
binaryPrint(image)
else:
print image
except TileCacheException, E:
print "Cache-Control: max-age=10, must-revalidate" # make the client reload
print "Content-type: text/plain\n"
print "An error occurred: %s\n" % (str(E))
except Exception, E:
print "Cache-Control: max-age=10, must-revalidate" # make the client reload
print "Content-type: text/plain\n"
print "An error occurred: %s\n%s\n" % (
str(E),
"".join(traceback.format_tb(sys.exc_traceback)))
theService = {}
lastRead = {}
def handler (apacheReq):
global theService, lastRead
options = apacheReq.get_options()
apacheReq.add_common_vars()
env_vars = apacheReq.subprocess_env
getReqStr = env_vars['QUERY_STRING']
getReqArr = getReqStr.split('&')
getReqDict = {}
for item in getReqArr:
if item:
tempArr = item.split('=')
getReqDict[tempArr[0]] = tempArr[1]
myGeoCloudDB = getReqDict['cfg']
cfgs = cfgfiles
fileChanged = False
configFile = os.path.dirname(__file__) + '/../../../app/wms/cfgfiles/' +myGeoCloudDB +'.tilecache.cfg'
cfgs = cfgs + (configFile,)
try:
cfgTime = os.stat(configFile)[8]
fileChanged = lastRead[configFile] < cfgTime
sys.stderr.write("lastRead %s\n" % lastRead[configFile])
sys.stderr.write("cfgTime %s\n" % cfgTime)
sys.stderr.write("fileChanged %s\n" % fileChanged)
sys.stderr.write("\n")
except:
pass
if not theService.has_key(configFile) or fileChanged:
lastRead[configFile] = time.time()
theService[configFile] = Service.load(*cfgs)
sys.stderr.write("\n")
sys.stderr.write("Load\n")
sys.stderr.write("\n")
return modPythonHandler(apacheReq, theService[configFile])
def wsgiApp (environ, start_response):
global theService, myGeoCloudDB
form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
myGeoCloudDB = form['cfg'].value
if 'temp' not in locals():
temp = myGeoCloudDB
sys.stderr.write("Temp="+temp+"\n")
sys.stderr.write(myGeoCloudDB+"\n")
cfgfiles = ("","../../app/wms/cfgfiles/" + myGeoCloudDB + ".tilecache.cfg")
cfgs = cfgfiles
if temp != myGeoCloudDB or not theService: # We have to reload the cfg cos the different cfg files
temp = myGeoCloudDB
theService = Service.load(*cfgs)
return wsgiHandler(environ, start_response, theService)
def binaryPrint(binary_data):
"""This function is designed to work around the fact that Python
in Windows does not handle binary output correctly. This function
will set the output to binary, and then write to stdout directly
rather than using print."""
try:
import msvcrt
msvcrt.setmode(sys.__stdout__.fileno(), os.O_BINARY)
except:
pass
sys.stdout.write(binary_data)
def paste_deploy_app(global_conf, full_stack=True, **app_conf):
if 'tilecache_config' in app_conf:
cfgfiles = (app_conf['tilecache_config'],)
else:
raise TileCacheException("No tilecache_config key found in configuration. Please specify location of tilecache config file in your ini file.")
theService = Service.load(*cfgfiles)
if 'exception' in theService.metadata:
raise theService.metadata['exception']
def pdWsgiApp (environ,start_response):
return wsgiHandler(environ,start_response,theService)
return pdWsgiApp
if __name__ == '__main__':
svc = Service.load(*cfgfiles)
cgiHandler(svc)
|
{
"content_hash": "f8e538388d00a350f7e544762e5c06ce",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 166,
"avg_line_length": 40.91078838174274,
"alnum_prop": 0.5668644454586946,
"repo_name": "pcucurullo/groot",
"id": "37958285e604a371558d649b1275060e1e989925",
"size": "19802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "public/cgi/TileCache/Service.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "2870"
},
{
"name": "C",
"bytes": "7006"
},
{
"name": "CSS",
"bytes": "7538133"
},
{
"name": "HTML",
"bytes": "49958485"
},
{
"name": "JavaScript",
"bytes": "60060170"
},
{
"name": "Makefile",
"bytes": "2100"
},
{
"name": "PHP",
"bytes": "4381269"
},
{
"name": "PLpgSQL",
"bytes": "30016"
},
{
"name": "Python",
"bytes": "344097"
},
{
"name": "SQLPL",
"bytes": "17423"
},
{
"name": "Shell",
"bytes": "15692"
},
{
"name": "XSLT",
"bytes": "2204"
}
],
"symlink_target": ""
}
|
import copy
from pony.orm import *
from pony import orm
from typing import Optional
from horsefax.telegram.services.command import Command
from ..core import HorseFaxBot, ModuleTools, BaseModule
from ..db import db
class Alias(db.Entity):
id = PrimaryKey(int, auto=True)
alias = Required(str, unique=True)
command = Required(str)
added_by = orm.Optional(int)
class AliasModule(BaseModule):
@db_session
def __init__(self, bot: HorseFaxBot, util: ModuleTools) -> None:
self.bot = bot
self.util = util
self.util.register_command('addalias', self.add_alias)
self.util.register_command('removealias', self.remove_alias)
for alias in Alias.select():
self.util.register_command(alias.alias, self.handle_alias)
@db_session
def add_alias(self, command: Command) -> Optional[str]:
if len(command.args) < 2:
return "Syntax: `/addalias <alias> <command>`"
alias = command.args[0]
alias_to = ' '.join(command.args[1:])
try:
Alias(alias=alias, command=alias_to, added_by=command.message.sender.id)
except IntegrityError:
return "That alias already exists."
self.util.register_command(alias, self.handle_alias)
return f"Added alias /{alias}."
@db_session
def handle_alias(self, command: Command) -> Optional[str]:
alias = Alias.get(alias=command.command)
if not hasattr(command.message, 'alias_origin'):
command.message.alias_origin = []
if command.command in command.message.alias_origin:
return f"Detected command loop: `{' -> '.join(command.message.alias_origin)} -> {command.command}`"
command.message.alias_origin.append(command.command)
new_message = copy.copy(command.message)
new_message.text = "/" + alias.command
if len(command.args) > 0:
new_message.text += ' ' + ' '.join(command.args)
self.bot.commands.handle_message(new_message)
return None
@db_session
def remove_alias(self, command: Command) -> str:
if len(command.args) < 1:
return "Syntax: `/removealias alias`"
alias_name = command.args[0]
alias = Alias.get(alias=alias_name)
if alias is None:
return f"Alias `{alias_name}` does not exist."
alias.delete()
return f"Deleted `/{alias_name}` (which was an alias for `/{alias.command}`)."
|
{
"content_hash": "5bcb47db23991fd94fe43c3a4c7a7fce",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 111,
"avg_line_length": 36.8955223880597,
"alnum_prop": 0.6302588996763754,
"repo_name": "TallonRain/horsefaxbot",
"id": "85898a5ca3d9176f41d5961ea1d15cc814119e91",
"size": "2472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "horsefax/bot/modules/aliases.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54475"
}
],
"symlink_target": ""
}
|
"""
Van der Grinten
===============
The Van der Grinten projection, presented by Alphons J. van der Grinten in 1904, is
neither equal-area nor conformal. Central meridian and Equator are straight lines;
other meridians are arcs of circles. The scale is true along the Equator only. Its
main use is to show the entire world enclosed in a circle.
**v**\ [*lon0/*]\ *scale* or **V**\ [*lon0/*]\ *width*
The projection is set with **v** or **V**. The central meridian is set with the
optional *lon0*, and the figure size is set with *scale* or *width*.
"""
import pygmt
fig = pygmt.Figure()
# Use region "d" to specify global region (-180/180/-90/90)
fig.coast(region="d", projection="V12c", land="gray", water="cornsilk", frame="afg")
fig.show()
|
{
"content_hash": "0ed356c86ab11b1a1b72c2e929811e29",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 84,
"avg_line_length": 37.35,
"alnum_prop": 0.6934404283801874,
"repo_name": "GenericMappingTools/gmt-python",
"id": "a297b950c396524e6e23fd78c4a8574c2ae32ff9",
"size": "747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/projections/misc/misc_van_der_grinten.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1357"
},
{
"name": "Python",
"bytes": "292740"
},
{
"name": "Shell",
"bytes": "357"
}
],
"symlink_target": ""
}
|
import os
import logging
help = 'Import mcu to pgen. Provide a valid project file, pgen will parse to create mcu definition'
from ..tools_supported import ToolsSupported
from ..targets import mcu_create
def run(args):
root = os.getcwd()
tool = ToolsSupported().get_tool(args.tool)
if tool is None:
return -1
return mcu_create(tool, args.mcu, args.file, args.tool)
def setup(subparser):
subparser.add_argument(
'-mcu', action='store', required = True, help='MCU name')
# we need tool as some tools have same extensions and we might have problems
subparser.add_argument(
'-t', '--tool', action='store', required = True, help='Tool to be set')
subparser.add_argument(
'-f', '--file', action='store', required = True, help='Project file to be parsed (a valid tool project)')
|
{
"content_hash": "25012a7773647f14ca096cf2db6cacb9",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 113,
"avg_line_length": 35,
"alnum_prop": 0.6738095238095239,
"repo_name": "molejar/project_generator",
"id": "4ed07cc55410520b2220a220c3d9e0b8eccc80a3",
"size": "1412",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "project_generator/commands/import_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "237307"
}
],
"symlink_target": ""
}
|
from django.contrib.auth import get_user_model
from decimal import Decimal
from questions.models import UserAnswer, Question
from django.db.models import Q
# User = get_user_model()
# users = User.objects.all() #[user1 ,user2]
# all_user_answers = UserAnswer.objects.all().order_by("user_id")
def get_match(user_a, user_b):
q1 = Q(useranswer__user=user_a)
q2 = Q(useranswer__user=user_b)
# question_set = Question.objects.filter(q1 | q2).distinct()
question_set1 = Question.objects.filter(q1)
question_set2 = Question.objects.filter(q2)
if question_set1.count() == 0:
return 0.0, 0
if question_set2.count() == 0:
return 0.0, 0
question_set = (question_set1 | question_set2).distinct()
a_points = 0
b_points = 0
a_total_points = 0.000001
b_total_points = 0.000001
questions_in_common = 0
for question in question_set:
try:
a = UserAnswer.objects.get(user=user_a, question=question)
except:
a = None
try:
b = UserAnswer.objects.get(user=user_b, question=question)
except:
b = None
if a and b:
questions_in_common += 1
if a.their_answer == b.my_answer:
b_points += a.their_points
b_total_points += a.their_points
if b.their_answer == a.my_answer:
a_points += b.their_points
a_total_points += b.their_points
if questions_in_common > 0:
a_decimal = a_points / Decimal(a_total_points)
b_decimal = b_points / Decimal(b_total_points)
print b_decimal, a_decimal
if a_decimal == 0:
a_decimal = 0.000001
if b_decimal == 0:
b_decimal = 0.000001
match_percentage = (Decimal(a_decimal) * Decimal(b_decimal)) ** (1 / Decimal(questions_in_common))
return match_percentage, questions_in_common
else:
return 0.0, 0
|
{
"content_hash": "c1170e1864a2acd1471f94a99ef91e09",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 106,
"avg_line_length": 34.24561403508772,
"alnum_prop": 0.5932377049180327,
"repo_name": "hungtt57/matchmaker",
"id": "86a061e6cc8c69443826ef8772e917892a5662aa",
"size": "1952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/matches/utils.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "1350"
},
{
"name": "C",
"bytes": "33224"
},
{
"name": "CSS",
"bytes": "85777"
},
{
"name": "HTML",
"bytes": "150876"
},
{
"name": "JavaScript",
"bytes": "183948"
},
{
"name": "Python",
"bytes": "8566151"
},
{
"name": "Shell",
"bytes": "3310"
}
],
"symlink_target": ""
}
|
"""
Django settings for django_email_or_username project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pq*6+ece%np5dwf&6&(p1cxzj@wbe6j@juli4=nqh1m%g54ktz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend',
'accounts.backends.EmailOrUsernameAuthBackend', ]
ROOT_URLCONF = 'django_email_or_username.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_email_or_username.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
{
"content_hash": "2bf2d07d06d740cd77b5f3428f4e7c5b",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 77,
"avg_line_length": 26.72897196261682,
"alnum_prop": 0.6926573426573427,
"repo_name": "ScottyMJacobson/django-email-or-username",
"id": "6c8e433cc5ae7d68b144737d919628b428e003b6",
"size": "2860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_email_or_username/settings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "10251"
}
],
"symlink_target": ""
}
|
import unittest
import utils
from testbin import TestBin
class TestBinCostart(TestBin, unittest.TestCase):
def setUp(self):
self.bin = 'co-start'
self.do_not_test_running = True
def tearDown(self):
pass
|
{
"content_hash": "77e150eaebdb655ef2586ded51b78133",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 49,
"avg_line_length": 19.833333333333332,
"alnum_prop": 0.6722689075630253,
"repo_name": "compatibleone/accords-platform",
"id": "ae0ab07199d14261e32c8d2828419b361c51c294",
"size": "267",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "testsuite/basic/costart.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4783261"
},
{
"name": "C++",
"bytes": "98408"
},
{
"name": "CSS",
"bytes": "11779"
},
{
"name": "Java",
"bytes": "158712"
},
{
"name": "PHP",
"bytes": "99030"
},
{
"name": "Python",
"bytes": "207999"
},
{
"name": "Shell",
"bytes": "134814"
}
],
"symlink_target": ""
}
|
import graphene
from graphene_django import DjangoObjectType
from share import models
from share.graphql.base import AbstractShareObject
def Agent():
from share.graphql.agent import AbstractAgent
return AbstractAgent
def CreativeWork():
from share.graphql.work import AbstractCreativeWork
return AbstractCreativeWork
class AbstractWorkRelation(AbstractShareObject):
subject = graphene.Field(CreativeWork)
related = graphene.Field(CreativeWork)
@graphene.resolve_only_args
def resolve_related(self):
return self.related
@graphene.resolve_only_args
def resolve_subject(self):
return self.subject
class AbstractAgentRelation(AbstractShareObject):
subject = graphene.Field(Agent)
related = graphene.Field(Agent)
@graphene.resolve_only_args
def resolve_related(self):
return self.related
@graphene.resolve_only_args
def resolve_subject(self):
return self.subject
class AbstractAgentWorkRelation(AbstractShareObject):
cited_as = graphene.String()
agent = graphene.Field(Agent)
creative_work = graphene.Field(CreativeWork)
@graphene.resolve_only_args
def resolve_agent(self):
return self.agent
for base, interface in ((models.AgentRelation, AbstractAgentRelation), (models.WorkRelation, AbstractWorkRelation), (models.AgentWorkRelation, AbstractAgentWorkRelation)):
for klass in base.get_type_classes():
locals()[klass.__name__] = type(klass.__name__, (DjangoObjectType, ), {
'Meta': type('Meta', (), {'model': klass, 'interfaces': (interface, )})
})
|
{
"content_hash": "06df996d361f74fe1ace6ebae7bccaa2",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 171,
"avg_line_length": 27.440677966101696,
"alnum_prop": 0.7195799876466955,
"repo_name": "laurenbarker/SHARE",
"id": "6653168ec3c82d54e7c45fe685492b7115f064e1",
"size": "1619",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "share/graphql/relations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3786"
},
{
"name": "Gherkin",
"bytes": "1773"
},
{
"name": "HTML",
"bytes": "4849"
},
{
"name": "Python",
"bytes": "1431647"
},
{
"name": "Shell",
"bytes": "830"
}
],
"symlink_target": ""
}
|
""" Manage mentions and their attributes. """
from cort.core import mention_property_computer
from cort.core import spans
__author__ = 'smartschat'
class Mention:
""" A mention is an expression in a document which is potentially referring.
Attributes:
document (CoNLLDocument): The document the mention belongs to.
span (Span): The span of the mention in its document. If for example
the span is (3, 4), then the mention starts at the 3rd token in
the document and ends at the 4th (inclusive).
attributes (dict(str, object)): A mapping of attribute names to
attribute values. When creating a document from a text, The
following attributes are used:
- tokens (list(str)): the tokens of the mention,
- head (list(str)): the head words of the mention,
- pos (list(str)): the part-of-speech tags of the mention,
- ner (list(str)): the named entity tags of the mention,
as found in the data,
- type (str): the mention type, one of
- NAM (proper name),
- NOM (common noun),
- PRO (pronoun),
- DEM (demonstrative pronoun),
- VRB (verb),
- fine_type (str): only set when the mention is a nominal or a
pronoun, for nominals values
- DEF (definite noun phrase) or
- INDEF,
for pronouns values
- PERS_NOM (personal pronoun, nominative case),
- PERS_ACC (personal pronoun, accusative),
- REFL (reflexive pronoun),
- POSS (possessive pronoun) or
- POSS_ADJ (possessive adjective, e.g. 'his'),
- citation_form (str): only set if the mention is a pronoun,
then the canonical form of the pronoun, i.e. one of
i, you, he, she, it, we, they,
- grammatical_function (str): either SUBJECT, OBJECT or OTHER,
- number (str): either SINGULAR, PLURAL or UNKNOWN,
- gender (str): either MALE, FEMALE, NEUTRAL, PLURAL or UNKNOWN,
- semantic_class (str): either PERSON, OBJECT or UNKNOWN,
- sentence_id (int): the sentence id of the mention's sentence
(starting at 0),
- parse_tree (nltk.ParentedTree): the parse tree of the mention,
- speaker (str): the speaker of the mention,
- antecedent (Mention): the antecedent of the mention
(intially None),
- annotated_set_id (str): the set id of the mention as found
in the data,
- set_id (str): the set id of the mention computed by a
coreference resolution approach (initially None),
- head_span (Span): the span of the head (in the document),
- head_index (int): the mention-internal index of the start of
the head,
- is_apposition (bool): whether the mention contains an
apposition,
- head_as_lowercase_string (str): the head lowercased and as a
string,
- tokens_as_lowercase_string (str): all tokens of the mention
lowercased and as a string,
- first_in_gold_entity (bool): whether the mention is the first
mention in its gold entity (for system mentions, this is
also true if no preceding mention in the same entity was
found by the mention extractor),
"""
def __init__(self, document, span, attributes):
""" Initialize a mention in a document.
Args:
document (CoNLLDocument): The document the mention belongs to.
span (Span): The span of the mention in its document.
attributes (dict(str, object)): A mapping of attribute names to
attribute values (see the class documentation for more
information).
"""
self.document = document
self.span = span
self.attributes = attributes
@staticmethod
def dummy_from_document(document):
return Mention(document, None, {
"is_dummy": True,
"annotated_set_id": None,
"tokens": [],
"first_in_gold_entity": True
})
def is_dummy(self):
return "is_dummy" in self.attributes and self.attributes["is_dummy"]
@staticmethod
def from_document(span, document, first_in_gold_entity=False):
"""
Create a mention from a span in a document.
All attributes of the mention are computed from the linguistic
information found in the document. For information about the
attributes, see the class documentation.
Args:
document (CoNLLDocument): The document the mention belongs to.
span (Span): The span of the mention in the document.
Returns:
Mention: A mention extracted from the input span in the input
document.
"""
i, sentence_span = document.get_sentence_id_and_span(span)
attributes = {
"tokens": document.tokens[span.begin:span.end + 1],
"pos": document.pos[span.begin:span.end + 1],
"ner": document.ner[span.begin:span.end + 1],
"sentence_id": i,
"parse_tree": mention_property_computer.get_relevant_subtree(
span, document),
"speaker": document.speakers[span.begin],
"antecedent": None,
"set_id": None,
"first_in_gold_entity": first_in_gold_entity
}
if span in document.coref:
attributes["annotated_set_id"] = document.coref[span]
else:
attributes["annotated_set_id"] = None
attributes["is_apposition"] = \
mention_property_computer.is_apposition(attributes)
attributes["grammatical_function"] = \
mention_property_computer.get_grammatical_function(attributes)
(head, in_mention_span, head_index) = \
mention_property_computer.compute_head_information(attributes)
attributes["head"] = head
attributes["head_span"] = spans.Span(
span.begin + in_mention_span.begin,
span.begin + in_mention_span.end
)
attributes["head_index"] = head_index
attributes["type"] = mention_property_computer.get_type(attributes)
attributes["fine_type"] = mention_property_computer.get_fine_type(
attributes)
if attributes["type"] == "PRO":
attributes["citation_form"] = \
mention_property_computer.get_citation_form(
attributes)
attributes["number"] = \
mention_property_computer.compute_number(attributes)
attributes["gender"] = \
mention_property_computer.compute_gender(attributes)
attributes["semantic_class"] = \
mention_property_computer.compute_semantic_class(attributes)
attributes["head_as_lowercase_string"] = " ".join(attributes[
"head"]).lower()
attributes["tokens_as_lowercase_string"] = " ".join(attributes[
"tokens"]).lower()
dep_tree = document.dep[i]
index = span.begin + head_index - sentence_span.begin
governor_id = dep_tree[index].head - 1
if governor_id == -1:
attributes["governor"] = "NONE"
else:
attributes["governor"] = dep_tree[governor_id].form.lower()
attributes["ancestry"] = Mention._get_ancestry(dep_tree, index)
attributes["deprel"] = dep_tree[index].deprel
return Mention(document, span, attributes)
@staticmethod
def _get_ancestry(dep_tree, index, level=0):
if level >= 2:
return ""
else:
governor_id = dep_tree[index].head - 1
direction = "L"
if governor_id > index:
direction = "R"
if governor_id == -1:
return "-" + direction + "-NONE"
else:
return "-" + direction + "-" + dep_tree[governor_id].pos + \
Mention._get_ancestry(dep_tree, governor_id, level+1)
def __lt__(self, other):
""" Check whether this mention is less than another mention.
``self < other`` if and only if ``self.span < other.span``, that is,
- this mention has span None (is a dummy mention), and the other
mention has a span which is not None, or
- this mentions begins before the other mention, or
- the mentions begin at the same position, but this mention ends
before the other mention.
Args:
other (Mention): A mention.
Returns:
True if this mention is less than other, False otherwise.
"""
if self.span is None:
return other.span is not None
elif other.span is None:
return False
else:
return self.span < other.span
def __eq__(self, other):
""" Check for equality.
Two mentions are equal if they are in the same document and have the
same span.
Args:
other (Mention): A mention.
Returns:
True if the mentions are in the same document and have the same
span.
"""
if isinstance(other, self.__class__):
return self.span == other.span and self.document == other.document
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if self.document is None:
return hash((self.span.begin, self.span.end))
elif self.span is None:
return hash(self.document.identifier)
else:
return hash((self.document.identifier,
self.span.begin,
self.span.end))
def __str__(self):
return (repr(self.document) +
", " +
str(self.span) +
": "
+ " ".join(self.attributes["tokens"]))
def __repr__(self):
return (repr(self.document) +
", " +
str(self.span) +
": " +
str(self.attributes["tokens"]))
def get_context(self, window):
""" Get the context in a window around the mention.
Args:
window (int): An integer specifying the size of the window.
Returns:
list(str): The tokens in a window of around the mention.
In particular, get ``window`` tokens to the right or left of the
mention,, depending on the sign of ``window``: if the sign is +,
then to the right, if the sign is -, then to the left. Return
None if the window is not contained in the document.
"""
if window < 0 <= window + self.span.begin:
return self.document.tokens[
self.span.begin + window:self.span.begin]
elif (window > 0 and self.span.end + window + 1
<= len(self.document.tokens)):
return self.document.tokens[
self.span.end + 1:self.span.end + window + 1]
def is_coreferent_with(self, m):
""" Return whether this mention is coreferent with another mention.
Args:
m (Mention): Another mention.
Returns:
True if m and this mention are coreferent (are in the same document
and have the same annotated set id), False otherwise.
"""
self_set_id = self.attributes['annotated_set_id']
m_set_id = m.attributes['annotated_set_id']
if self.document is None and m.document is None:
return self_set_id is not None and self_set_id == m_set_id
elif self.is_dummy():
return m.is_dummy()
elif m.is_dummy():
return self.is_dummy()
else:
return self.document == m.document \
and self_set_id is not None \
and self_set_id == m_set_id
def decision_is_consistent(self, m):
""" Return whether the decision to put this mention and m into the
same entity is consistent with the gold annotation.
The decision is consistent if one of the following conditions holds:
- the mentions are coreferent,
- one of the mentions is the dummy mention, and the other mention
does not have a preceding mention that it is coreferent with.
Args:
m (Mention): Another mention.
Returns:
True if m and this mention are consistent according to the
definition above, False otherwise.
"""
if self.is_coreferent_with(m):
return True
elif self.is_dummy():
return m.attributes['annotated_set_id'] is None \
or m.attributes["first_in_gold_entity"]
elif m.is_dummy():
return self.attributes['annotated_set_id'] is None \
or self.attributes["first_in_gold_entity"]
else:
return False
|
{
"content_hash": "74b9c56dbb65020be8ed73095137bdb9",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 80,
"avg_line_length": 36.90710382513661,
"alnum_prop": 0.5542641397690258,
"repo_name": "smartschat/cort",
"id": "0ca80f734725b357a6e35cb8e0c067dc718bc118",
"size": "13508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cort/core/mentions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4234"
},
{
"name": "Java",
"bytes": "324927"
},
{
"name": "JavaScript",
"bytes": "475404"
},
{
"name": "Perl",
"bytes": "165676"
},
{
"name": "Python",
"bytes": "420807"
},
{
"name": "R",
"bytes": "4022"
}
],
"symlink_target": ""
}
|
import sys
from webtest import TestApp
from minimal2.application import make_wsgi
class TestAltStack(object):
@classmethod
def setup_class(cls):
try:
del sys.modules['minimal2.views']
del sys.modules['minimal2.components.news.views']
del sys.modules['newscomp4.views']
except KeyError:
pass
cls.wsgiapp = make_wsgi('Dispatching', use_session=False)
cls.ta = TestApp(cls.wsgiapp)
def test_workingview(self):
r = self.ta.get('/workingview')
r.mustcontain('hello foo!')
def test_no_session(self):
r = self.ta.get('/nosession')
r.mustcontain('hello nosession!')
def test_forward(self):
r = self.ta.get('/page1')
r.mustcontain('page2!')
def test_asview_from_component(self):
# internal component
r = self.ta.get('/news')
r.mustcontain('min2 news index')
# external component
r = self.ta.get('/news/display')
r.mustcontain('np4 display')
class TestAltStackWithSession(object):
@classmethod
def setup_class(cls):
try:
del sys.modules['minimal2.views']
except KeyError:
pass
cls.wsgiapp = make_wsgi('Dispatching')
cls.ta = TestApp(cls.wsgiapp)
def test_hassession(self):
r = self.ta.get('/hassession')
r.mustcontain('hello hassession!')
def test_session_saves(self):
self.ta.get('/session1')
self.ta.get('/session2')
# get a new ta so that the cookie is different
nta = TestApp(self.wsgiapp)
nta.get('/session3')
|
{
"content_hash": "0f0a5eb95865662aa6affd618911bd17",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 65,
"avg_line_length": 25.015151515151516,
"alnum_prop": 0.5923682616596002,
"repo_name": "level12/blazeweb",
"id": "97dd40a56eba4a6b811931c03eebe716bd8d6156",
"size": "1651",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_dispatching.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "544163"
},
{
"name": "JavaScript",
"bytes": "88"
},
{
"name": "Python",
"bytes": "434794"
}
],
"symlink_target": ""
}
|
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
try:
from onshape_client.oas.models import bt_feature_state1688
except ImportError:
bt_feature_state1688 = sys.modules["onshape_client.oas.models.bt_feature_state1688"]
try:
from onshape_client.oas.models import btm_assembly_feature887
except ImportError:
btm_assembly_feature887 = sys.modules[
"onshape_client.oas.models.btm_assembly_feature887"
]
class BTAssemblyFeatureListResponse1174AllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"feature_states": (
{str: (bt_feature_state1688.BTFeatureState1688,)},
), # noqa: E501
"features": (
[btm_assembly_feature887.BTMAssemblyFeature887],
), # noqa: E501
"is_complete": (bool,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"feature_states": "featureStates", # noqa: E501
"features": "features", # noqa: E501
"is_complete": "isComplete", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_assembly_feature_list_response1174_all_of.BTAssemblyFeatureListResponse1174AllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
feature_states ({str: (bt_feature_state1688.BTFeatureState1688,)}): [optional] # noqa: E501
features ([btm_assembly_feature887.BTMAssemblyFeature887]): [optional] # noqa: E501
is_complete (bool): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
{
"content_hash": "9c3d2c00c978ae5a1bfeb27ce4b2ce21",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 123,
"avg_line_length": 34.63636363636363,
"alnum_prop": 0.5879265091863517,
"repo_name": "onshape-public/onshape-clients",
"id": "a0044d4d2040f52672103940d819406f072553f5",
"size": "5732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/onshape_client/oas/models/bt_assembly_feature_list_response1174_all_of.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4873"
},
{
"name": "Go",
"bytes": "59674"
},
{
"name": "HTML",
"bytes": "3851790"
},
{
"name": "JavaScript",
"bytes": "2217"
},
{
"name": "Makefile",
"bytes": "559"
},
{
"name": "Python",
"bytes": "7560009"
},
{
"name": "Shell",
"bytes": "3475"
},
{
"name": "TypeScript",
"bytes": "1412661"
}
],
"symlink_target": ""
}
|
r"""Mako template to Hjson register description
"""
import sys
import argparse
from io import StringIO
from mako.template import Template
def main():
parser = argparse.ArgumentParser(prog="reg_timer")
parser.add_argument(
'input',
nargs='?',
metavar='file',
type=argparse.FileType('r'),
default=sys.stdin,
help='input template file')
parser.add_argument('--harts', '-s', type=int, help='Number of Harts')
parser.add_argument(
'--timers',
'-t',
type=int,
default=1,
help='Number of Timers in a Hart. Maximum up to 32')
args = parser.parse_args()
if args.timers > 32:
raise Exception("OOB TIMERS")
# Determine output: if stdin then stdout if not then ??
out = StringIO()
reg_tpl = Template(args.input.read())
out.write(reg_tpl.render(harts=args.harts, timers=args.timers))
print(out.getvalue())
out.close()
if __name__ == "__main__":
main()
|
{
"content_hash": "f53f707e66d4fc142411caf36189d79e",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 74,
"avg_line_length": 23.186046511627907,
"alnum_prop": 0.6028084252758275,
"repo_name": "chipsalliance/Surelog",
"id": "420906058fe00846ad671ec0faf94a345313f46a",
"size": "1167",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/tests/Opentitan/hw/ip/rv_timer/util/reg_timer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "155641"
},
{
"name": "C",
"bytes": "3114"
},
{
"name": "C++",
"bytes": "2808920"
},
{
"name": "CMake",
"bytes": "41750"
},
{
"name": "Forth",
"bytes": "81"
},
{
"name": "Makefile",
"bytes": "4820"
},
{
"name": "Nix",
"bytes": "784"
},
{
"name": "Python",
"bytes": "110922"
},
{
"name": "SWIG",
"bytes": "351"
},
{
"name": "Shell",
"bytes": "1349"
},
{
"name": "Slash",
"bytes": "37570"
},
{
"name": "SystemVerilog",
"bytes": "872314"
},
{
"name": "Tcl",
"bytes": "68865"
},
{
"name": "V",
"bytes": "1092"
},
{
"name": "Verilog",
"bytes": "495242"
}
],
"symlink_target": ""
}
|
import itk
import re
import sys
from itk.support.template_class import itkTemplate
from optparse import OptionParser
ctypes = [
"double",
"float",
"signed_char",
"signed_short",
"signed_long",
"unsigned_char",
"unsigned_short",
"unsigned_long",
"char",
"short",
"long",
"bool",
"int",
"unsigned_int",
"void",
]
excludedMethodsList = [
"Delete",
"Unregister",
"SetReferenceCount",
# the method broken for all filters
"PushBackInput",
"PushFrontInput",
"GraftOutput",
"SetInput",
"UpdateOutputData",
"PropagateRequestedRegion",
"EnlargeOutputRequestedRegion",
# functor are not wrapped so exclude GetFunctor
"GetFunctor",
#'UnCreateAllInstanceRegister',
#'Update',
#'UpdateLargestPossibleRegion',
#'GenerateInputRequestedRegion',
#'GenerateOutputInformation',
#'GenerateData',
#'GetImageIO',
#'UpdateOutputData',
#'UpdateOutputInformation',
#'GraftOutput',
#'AddSeed',
#'GetInputs',
#'GetOutputs',
#'CreateAllInstance'
]
excludedMethodsWithParamList = [
#'GetElement',
#'RegisterFactory',
#'UnRegisterFactory',
#'PropagateRequestedRegion',
#'Resize',
#'PadByRadius'
]
excludedClasses = [
#'BinaryBallStructuringElement',
#'Neighborhood',
#'BSplineDecompositionImageFilter',
#'AtanImageFilter',
#'Atan2ImageFilter',
#'ChangeInformationImageFilter',
#'GeodesicActiveContourLevelSetImageFilter',
#'Image',
#'ImageBase',
#'ImageFileReader',
#'ImageFileWriter',
#'ImageRegistrationMethod',
#'BSplineDownsampleImageFilter',
#'BSplineUpsampleImageFilter',
#'CropImageFilter',
#'LevelSetFunction',
#'MattesMutualInformationImageToImageMetric',
#'MeanReciprocalSquareDifferenceImageToImageMetric',
#'MeanSquaresImageToImageMetric',
#'MultiResolutionImageRegistrationMethod',
#'MultiResolutionPyramidImageFilter',
#'MutualInformationImageToImageMetric',
#'NormalizedCorrelationImageToImageMetric',
#'ParallelSparseFieldLevelSetImageFilter',
#'PyBuffer',
#'RawImageIO',
#'RecursiveMultiResolutionPyramidImageFilter',
#'ResampleImageFilter',
#'RescaleIntensityImageFilter',
#'SegmentationLevelSetImageFilter',
#'ShapeDetectionLevelSetImageFilter',
#'SparseFieldFourthOrderLevelSetImageFilter',
#'SparseFieldLevelSetImageFilter',
#'SpatialObject',
#'SpatialObjectTreeNode',
#'WatershedImageFilter',
#'TreeNode',
#'ThresholdSegmentationLevelSetImageFilter',
#'PointSet',
#'ImageConstIterator',
#'ImageConstIteratorWithIndex',
#'ImageIterator',
#'ImageIteratorWithIndex',
#'ImageLinearConstIteratorWithIndex',
#'ImageLinearIteratorWithIndex',
#'ImageRandomConstIteratorWithIndex',
#'ImageRandomIteratorWithIndex',
#'ImageRandomNonRepeatingConstIteratorWithIndex',
#'ImageRandomNonRepeatingIteratorWithIndex',
#'ImageRegionConstIterator',
#'ImageRegionIterator',
#'ImageRegionConstIteratorWithIndex',
#'ImageRegionIteratorWithIndex',
#'InterpolateImageFilter',
#'IsolatedConnectedImageFilter',
"SmartPointer",
"BMPImageIO",
]
def log(s, level):
if level <= options.verbose:
print((logFile, s))
logFile.flush()
def cleanType(s):
i = s.index("_", 1)
return s[i + 1 :]
def addUnwrappedType(s):
s = cleanType(s)
if not s in unwrappedTypes:
unwrappedTypes.add(s)
log(s, 0)
def exploreTpl(tpl):
for cl in tpl.values():
exploreMethods(cl, cl)
# try to instanciate the class
try:
obj = cl.New()
exploreMethods(obj, cl)
except:
pass
try:
exploreMethods(cl(), cl)
except:
pass
def exploreMethods(obj, cl):
isin = isinstance(i, str)
ls = excludedMethodsList
attrNameList = sorted(
[i for i in dir(obj) if isin and i[0].isupper() and i not in ls]
)
for attrName in attrNameList:
log(" + " + attrName, 2)
try:
parameters = repr((cl.__name__, attrName))
if parameters not in exclude:
log(parameters, 4)
exec(f"s = obj.{attrName}()")
if isUnwrappedTypeString(s):
addUnwrappedType(s)
log(" - " + cleanType(s), 5)
except:
# try with some parameters
if attrName not in excludedMethodsWithParamList:
for param in [0, "", False, None]:
parameters = repr((cl.__name__, attrName, param))
if parameters not in exclude:
log(" * " + repr(param), 3)
log(parameters, 4)
try:
exec(f"s = obj.{attrName}(param)")
if isUnwrappedTypeString(s):
addUnwrappedType(s)
log(" - " + cleanType(s), 5)
except:
pass
def isUnwrappedTypeString(s):
if not isinstance(s, str):
return False
if not s[0] == "_":
return False
for t in ctypes:
if re.match(f"^_[0-9a-z]+_p_{t}$", s):
return False
return True
parser = OptionParser(usage="usage: %prog")
parser.add_option("--exclude", dest="exclude", default=None, metavar="FILE", help="")
parser.add_option("--log-file", dest="logFile", default="-", metavar="FILE", help="")
parser.add_option(
"--start-from", dest="startFrom", default=None, metavar="CLASS", help=""
)
parser.add_option("-v", "--verbose", dest="verbose", default=0, type="int", help="")
(options, args) = parser.parse_args()
if options.logFile == "-":
logFile = sys.stdout
else:
logFile = file(options.logFile, "w")
exclude = set()
if options.exclude:
exclude = set(file(options.exclude).read().splitlines())
val = [i for i in dir(itk) if i[0].isupper() and len(i) > 2]
attrNameList = sorted(set(val) - set(excludedClasses))
if options.startFrom:
attrNameList = attrNameList[attrNameList.index(options.startFrom) :]
unwrappedTypes = set()
for name in attrNameList:
exec("attr = itk." + name)
# attr = itk.__dict__[name]
log(name, 1)
if isinstance(attr, itkTemplate):
exploreTpl(attr)
else:
exploreMethods(attr, attr)
try:
exploreMethods(attr.New(), attr)
except:
pass
try:
exploreMethods(attr(), attr)
except:
pass
|
{
"content_hash": "f87f7b73024f124dcd84c43d2207e000",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 85,
"avg_line_length": 26.842741935483872,
"alnum_prop": 0.6062791047018177,
"repo_name": "vfonov/ITK",
"id": "9a02b64238b6c42ab0e224b1e4da4fe9ca2b2c89",
"size": "7412",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Wrapping/Generators/Python/Tests/returnedTypeCoverage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "463579"
},
{
"name": "C++",
"bytes": "35045408"
},
{
"name": "CMake",
"bytes": "1623319"
},
{
"name": "CSS",
"bytes": "17428"
},
{
"name": "HTML",
"bytes": "8370"
},
{
"name": "Java",
"bytes": "28281"
},
{
"name": "JavaScript",
"bytes": "1522"
},
{
"name": "Objective-C++",
"bytes": "5640"
},
{
"name": "Perl",
"bytes": "6029"
},
{
"name": "Python",
"bytes": "569543"
},
{
"name": "Ruby",
"bytes": "296"
},
{
"name": "SWIG",
"bytes": "66033"
},
{
"name": "Shell",
"bytes": "165002"
},
{
"name": "Tcl",
"bytes": "77628"
},
{
"name": "XSLT",
"bytes": "8634"
}
],
"symlink_target": ""
}
|
import time
from ginga import trcalc
from ginga.misc.Bunch import Bunch
from .CanvasMixin import CanvasMixin
__all__ = ['DrawingMixin']
class DrawingMixin(object):
"""The DrawingMixin is a mixin class that adds drawing capability for
some of the basic CanvasObject-derived types. The set_surface method is
used to associate a CanvasView object for layering on.
"""
def __init__(self):
assert isinstance(self, CanvasMixin), "Missing CanvasMixin class"
from .CanvasObject import drawCatalog
# For interactive drawing
self.candraw = False
self.dc = drawCatalog
# canvas objects which we know how to draw have an "idraw"
# class method
self.drawtypes = [key for key in self.dc.keys()
if hasattr(self.dc[key], 'idraw')]
self.drawtypes.sort()
self.t_drawtype = 'point'
self.t_drawparams = {}
# holds the drawing context
self._draw_cxt = None
# For interactive editing
self.canedit = False
# Set to False to disable drag moves except from move control pt
self.easymove = True
self._start_x = 0
self._start_y = 0
self._cp_index = None
self._edit_obj = None
self._edit_status = False
self._edit_detail = {}
self._pick_cur_objs = set([])
self._pick_sel_objs = set([])
# For modes
self._mode = 'draw'
self._mode_tbl = Bunch()
self.add_draw_mode(None)
self.add_draw_mode('draw', down=self.draw_start,
move=self.draw_motion, up=self.draw_stop,
poly_add=self.draw_poly_add,
poly_delete=self.draw_poly_delete)
self.add_draw_mode('edit', down=self.edit_start,
move=self.edit_motion, up=self.edit_stop,
poly_add=self.edit_poly_add,
poly_delete=self.edit_poly_delete)
self.add_draw_mode('pick', down=self.pick_start,
move=self.pick_motion, up=self.pick_stop,
hover=self.pick_hover, key=self.pick_key,
poly_add=self.edit_poly_add,
poly_delete=self.edit_poly_delete)
# For selection
self._selected = []
self.multi_select_ok = False
# this controls whether an object is automatically selected for
# editing immediately after being drawn
self.edit_follows_draw = False
self._process_time = 0.0
# time delta threshold for deciding whether to update the image
self._delta_time = 0.020
self._draw_obj = None
# NOTE: must be mixed in with a Callback.Callbacks
for name in ('draw-event', 'draw-down', 'draw-move', 'draw-up',
'cursor-down', 'cursor-up', 'cursor-move',
'draw-scroll', 'keydown-poly_add', 'keydown-poly_del',
'keydown-edit_del', 'edit-event',
'edit-select', 'drag-drop'):
self.enable_callback(name)
for name in ['key-down', 'key-up', 'btn-down', 'btn-move', 'btn-up',
'scroll', 'pinch', 'pan']:
self.enable_callback('%s-none' % (name))
def set_surface(self, viewer):
self.viewer = viewer
# Register this canvas for events of interest.
# Assumes we are mixed in with a canvas
canvas = self
# for legacy drawing via draw mode in Bindmap
canvas.add_callback('draw-down', self.draw_start, viewer)
canvas.add_callback('draw-move', self.draw_motion, viewer)
canvas.add_callback('draw-up', self.draw_stop, viewer)
canvas.add_callback('key-down-none', self._draw_op, 'key', viewer)
canvas.add_callback('keydown-poly_add', self._draw_op, 'poly_add',
viewer)
canvas.add_callback('keydown-poly_del', self._draw_op, 'poly_delete',
viewer)
canvas.add_callback('keydown-edit_del', self.edit_delete_cb, viewer)
#canvas.add_callback('draw-scroll', self._edit_rotate_cb, viewer)
#canvas.add_callback('draw-scroll', self._edit_scale_cb, viewer)
def register_for_cursor_drawing(self, viewer):
canvas = self
canvas.add_callback('cursor-down', self._draw_op, 'down', viewer)
canvas.add_callback('cursor-move', self._draw_op, 'move', viewer)
canvas.add_callback('cursor-up', self._draw_op, 'up', viewer)
canvas.set_callback('none-move', self._draw_op, 'hover', viewer)
##### MODE LOGIC #####
def add_draw_mode(self, name, **kwargs):
try:
bnch = self._mode_tbl[name]
except KeyError:
bnch = Bunch(name=name, **kwargs)
self._mode_tbl[name] = bnch
return bnch
def set_draw_mode(self, mode):
if mode not in self._mode_tbl:
modes = list(self._mode_tbl.keys())
raise ValueError("mode must be one of: %s" % (str(modes)))
self._mode = mode
if mode != 'edit':
self.clear_selected()
self.update_canvas()
def get_draw_mode(self):
return self._mode
def _draw_op(self, canvas, event, data_x, data_y, opn, viewer):
if viewer != event.viewer:
return False
mode = self._mode
# Hack to handle legacy drawing using draw mode in Bindmap
if self.is_drawing():
mode = 'draw'
try:
method = self._mode_tbl[mode][opn]
except KeyError:
return False
if method is not None:
return method(canvas, event, data_x, data_y, viewer)
return False
##### DRAWING LOGIC #####
def _draw_update(self, data_x, data_y, cxt, force_update=False):
obj = None
# update the context with current position
x, y = cxt.crdmap.data_to((data_x, data_y))
cxt.setvals(x=x, y=y, data_x=data_x, data_y=data_y)
draw_class = cxt.draw_class
if draw_class is None:
return False
obj = draw_class.idraw(self, cxt)
# update display every delta_time secs
if obj is not None:
obj.initialize(self, cxt.viewer, self.logger)
self._draw_obj = obj
if force_update or (time.time() - self._process_time > self._delta_time):
self.process_drawing()
return True
def draw_start(self, canvas, event, data_x, data_y, viewer):
if not self.candraw:
return False
self._draw_obj = None
self.clear_selected()
# get the drawing coordinate type (default 'data')
crdtype = self.t_drawparams.get('coord', 'data')
crdmap = viewer.get_coordmap(crdtype)
x, y = crdmap.data_to((data_x, data_y))
klass = self.dc.get(self.t_drawtype, None)
# create the drawing context
self._draw_cxt = Bunch(start_x=x, start_y=y, points=[(x, y)],
x=x, y=y, data_x=data_x, data_y=data_y,
drawparams=self.t_drawparams,
crdmap=crdmap, viewer=viewer,
draw_class=klass, logger=self.logger)
self._draw_update(data_x, data_y, self._draw_cxt, force_update=True)
return True
def draw_stop(self, canvas, event, data_x, data_y, viewer):
if not self.candraw:
return False
self._draw_update(data_x, data_y, self._draw_cxt)
obj, self._draw_obj = self._draw_obj, None
if obj is not None:
objtag = self.add(obj)
self.make_callback('draw-event', objtag)
if self.edit_follows_draw:
#self.set_draw_mode('edit')
self.edit_select(obj)
self.make_callback('edit-select', self._edit_obj)
return True
else:
self.process_drawing()
def draw_motion(self, canvas, event, data_x, data_y, viewer):
if not self.candraw:
return False
self._draw_update(data_x, data_y, self._draw_cxt)
return True
def draw_poly_add(self, canvas, event, data_x, data_y, viewer):
if not self.candraw:
return False
cxt = self._draw_cxt
if self.t_drawtype in ('polygon', 'freepolygon', 'path', 'freepath'):
x, y = cxt.crdmap.data_to((data_x, data_y))
cxt.points.append((x, y))
elif self.t_drawtype == 'beziercurve' and len(cxt.points) < 3:
x, y = cxt.crdmap.data_to((data_x, data_y))
cxt.points.append((x, y))
self._draw_update(data_x, data_y, cxt, force_update=True)
return True
def draw_poly_delete(self, canvas, event, data_x, data_y, viewer):
if not self.candraw:
return False
cxt = self._draw_cxt
if self.t_drawtype in ('polygon', 'freepolygon', 'path',
'freepath', 'beziercurve'):
if len(cxt.points) > 0:
cxt.points.pop()
self._draw_update(data_x, data_y, cxt, force_update=True)
return True
def is_drawing(self):
return self._draw_obj is not None
def enable_draw(self, tf):
self.candraw = tf
def set_drawcolor(self, colorname):
self.t_drawparams['color'] = colorname
def set_drawtype(self, drawtype, **drawparams):
if drawtype is not None:
drawtype = drawtype.lower()
assert drawtype in self.drawtypes, \
ValueError("Bad drawing type '%s': must be one of %s" % (
drawtype, self.drawtypes))
self.t_drawtype = drawtype
self.t_drawparams = drawparams.copy()
def get_drawtypes(self):
return self.drawtypes
def get_drawtype(self):
return self.t_drawtype
def get_draw_class(self, drawtype):
drawtype = drawtype.lower()
klass = self.dc[drawtype]
return klass
def get_draw_classes(self):
return self.dc
def get_drawparams(self):
return self.t_drawparams.copy()
def process_drawing(self):
self._process_time = time.time()
#self.redraw(whence=3)
self.update_canvas()
def register_canvas_type(self, name, klass):
drawtype = name.lower()
self.dc[drawtype] = klass
if drawtype not in self.drawtypes:
self.drawtypes.append(drawtype)
self.drawtypes.sort()
##### EDITING LOGIC #####
def get_edit_object(self):
return self._edit_obj
def is_editing(self):
return self.get_edit_obj() is not None
def enable_edit(self, tf):
self.canedit = tf
def _rot_xlate(self, obj, data_x, data_y):
# translate point back into non-rotated form
rot_deg = - obj.rot_deg
xoff, yoff = obj.get_center_pt()
data_x, data_y = trcalc.rotate_pt(data_x, data_y, rot_deg,
xoff=xoff, yoff=yoff)
return data_x, data_y
def _edit_update(self, data_x, data_y, viewer):
if (not self.canedit) or (self._cp_index is None):
return False
x, y = data_x, data_y
if self._cp_index < 0:
if self.easymove:
self._edit_obj.set_edit_point(0, (x - self._start_x,
y - self._start_y),
self._edit_detail)
else:
# special hack for objects that have rot_deg attribute
if hasattr(self._edit_obj, 'rot_deg') and (self._cp_index > 0):
x, y = self._rot_xlate(self._edit_obj, x, y)
self._edit_obj.set_edit_point(self._cp_index, (x, y),
self._edit_detail)
#self._edit_obj.sync_state()
if time.time() - self._process_time > self._delta_time:
self.process_drawing()
return True
def _is_editable(self, obj, pt, is_inside):
return is_inside and obj.editable
def _prepare_to_move(self, obj, data_x, data_y):
#print(("moving an object", obj.editable))
self.edit_select(obj)
self._cp_index = -1
ref_x, ref_y = self._edit_obj.get_reference_pt()
self._start_x, self._start_y = data_x - ref_x, data_y - ref_y
#print(("end moving an object", obj.editable))
def edit_start(self, canvas, event, data_x, data_y, viewer):
if not self.canedit:
return False
self._edit_tmp = self._edit_obj
self._edit_status = False
self._edit_detail = Bunch(viewer=viewer)
self._cp_index = None
#shift_held = 'shift' in event.modifiers
shift_held = False
selects = self.get_selected()
if len(selects) == 0:
#print("no objects already selected")
# <-- no objects already selected
# check for objects at this location
#print("getting items")
objs = canvas.select_items_at(viewer, (data_x, data_y),
test=self._is_editable)
#print("items: %s" % (str(objs)))
if len(objs) == 0:
# <-- no objects under cursor
return False
# pick top object
obj = objs[-1]
self._prepare_to_move(obj, data_x, data_y)
else:
self._edit_status = True
# Ugh. Check each selected object's control points
# for a match
contains = []
for obj in selects:
#print("editing: checking for cp")
edit_pts = obj.get_edit_points(viewer)
#print((self._edit_obj, edit_pts))
idx = obj.get_pt(viewer, edit_pts, (data_x, data_y),
obj.cap_radius)
if len(idx) > 0:
i = idx[0]
#print("editing cp #%d" % (i))
# editing a control point from an existing object
self._edit_obj = obj
self._cp_index = i
if hasattr(obj, 'rot_deg'):
x, y = self._rot_xlate(self._edit_obj, data_x, data_y)
else:
x, y = data_x, data_y
self._edit_detail.start_pos = (x, y)
obj.setup_edit(self._edit_detail)
self._edit_update(data_x, data_y, viewer)
return True
i = None
## if obj.contains_pt((data_x, data_y)):
## contains.append(obj)
# update: check if objects bbox contains this point
x1, y1, x2, y2 = obj.get_llur()
if (x1 <= data_x <= x2) and (y1 <= data_y <= y2):
contains.append(obj)
# <-- no control points match, is there an object that contains
# this point?
if len(contains) > 0:
# TODO?: make a compound object of contains and move it?
obj = contains[-1]
if self.is_selected(obj) and shift_held:
# deselecting object
self.select_remove(obj)
else:
self._prepare_to_move(obj, data_x, data_y)
## Compound = self.get_draw_class('compoundobject')
## c_obj = Compound(*self.get_selected())
## c_obj.inherit_from(obj)
## self._prepare_to_move(c_obj, data_x, data_y)
else:
# <-- user clicked outside any selected item's control pt
# and outside any selected item
if not shift_held:
self.clear_selected()
# see now if there is an unselected item at this location
objs = canvas.select_items_at(viewer, (data_x, data_y),
test=self._is_editable)
#print("new items: %s" % (str(objs)))
if len(objs) > 0:
# pick top object
obj = objs[-1]
#print(("top object", obj))
if self.num_selected() > 0:
#print("there are previously selected items")
# if there are already some selected items, then
# add this object to the selection, make a compound
# object
self.edit_select(obj)
Compound = self.get_draw_class('compoundobject')
c_obj = Compound(*self.get_selected())
c_obj.inherit_from(obj)
self._prepare_to_move(c_obj, data_x, data_y)
else:
# otherwise just start over with this new object
#print(("starting over"))
self._prepare_to_move(obj, data_x, data_y)
self.process_drawing()
return True
def edit_stop(self, canvas, event, data_x, data_y, viewer):
if not self.canedit:
return False
if (self._edit_tmp != self._edit_obj) or (
(self._edit_obj is not None) and
(self._edit_status != self.is_selected(self._edit_obj))):
# <-- editing status has changed
#print("making edit-select callback")
self.make_callback('edit-select', self._edit_obj)
if (self._edit_obj is not None) and (self._cp_index is not None):
# <-- an object has been edited
self._edit_update(data_x, data_y, viewer)
self._cp_index = None
self.make_callback('edit-event', self._edit_obj)
self._edit_obj.make_callback('edited')
return True
def edit_motion(self, canvas, event, data_x, data_y, viewer):
if not self.canedit:
return False
if (self._edit_obj is not None) and (self._cp_index is not None):
self._edit_update(data_x, data_y, viewer)
return True
return False
def edit_poly_add(self, canvas, event, data_x, data_y, viewer):
if not self.canedit:
return False
obj = self._edit_obj
if ((obj is not None) and self.is_selected(obj) and
(obj.kind in ('polygon', 'path'))):
self.logger.debug("checking points")
# determine which line we are adding a point to
points = list(obj.get_data_points())
if obj.kind == 'polygon':
points = points + [points[0]]
x0, y0 = points[0]
insert = None
for i in range(1, len(points[1:]) + 1):
x1, y1 = points[i]
self.logger.debug("checking line %d" % (i))
if obj.within_line(viewer, (data_x, data_y),
(x0, y0), (x1, y1), 8):
insert = i
break
x0, y0 = x1, y1
if insert is not None:
self.logger.debug("inserting point")
# Point near a line
pt = obj.crdmap.data_to((data_x, data_y))
obj.insert_pt(insert, pt)
self.process_drawing()
else:
self.logger.debug("cursor not near a line")
return True
def edit_poly_delete(self, canvas, event, data_x, data_y, viewer):
if not self.canedit:
return False
obj = self._edit_obj
if ((obj is not None) and self.is_selected(obj) and
(obj.kind in ('polygon', 'path'))):
self.logger.debug("checking points")
# determine which point we are deleting
points = list(obj.get_data_points())
delete = None
for i in range(len(points)):
x1, y1 = points[i]
self.logger.debug("checking vertex %d" % (i))
if obj.within_radius(viewer, (data_x, data_y), (x1, y1),
8):
delete = i
break
if delete is not None:
self.logger.debug("deleting point")
obj.delete_pt(delete)
self.process_drawing()
else:
self.logger.debug("cursor not near a point")
return True
def edit_rotate(self, delta_deg, viewer):
if self._edit_obj is None:
return False
self._edit_obj.rotate_by_deg([delta_deg])
self.process_drawing()
self.make_callback('edit-event', self._edit_obj)
return True
def _edit_rotate_cb(self, canvas, event, viewer, msg=True):
if not self.canedit or (viewer != event.viewer):
return False
bd = viewer.get_bindings()
amount = event.amount
if bd.get_direction(event.direction) == 'down':
amount = - amount
return self.edit_rotate(amount)
def edit_scale(self, delta_x, delta_y, viewer):
if self._edit_obj is None:
return False
self._edit_obj.scale_by_factors((delta_x, delta_y))
self.process_drawing()
self.make_callback('edit-event', self._edit_obj)
return True
def _edit_scale_cb(self, canvas, event, viewer, msg=True):
if not self.canedit or (viewer != event.viewer):
return False
bd = viewer.get_bindings()
if bd.get_direction(event.direction) == 'down':
amount = 0.9
else:
amount = 1.1
return self.edit_scale(amount, amount)
def edit_delete(self):
if (self._edit_obj is not None) and self.is_selected(self._edit_obj):
self.select_remove(self._edit_obj)
obj, self._edit_obj = self._edit_obj, None
self.delete_object(obj)
self.make_callback('edit-event', self._edit_obj)
return True
def edit_delete_cb(self, canvas, event, data_x, data_y, viewer):
if not self.canedit or (viewer != event.viewer):
return False
return self.edit_delete()
def edit_select(self, newobj):
if not self.canedit:
return False
if not self.multi_select_ok:
self.clear_selected()
# add new object to selection
self.select_add(newobj)
self._edit_obj = newobj
return True
##### SELECTION LOGIC #####
def _is_selectable(self, obj, x, y, is_inside):
return is_inside and obj.editable
#return is_inside
def is_selected(self, obj):
return obj in self._selected
def get_selected(self):
return self._selected
def num_selected(self):
return len(self._selected)
def clear_selected(self):
self._selected = []
def select_remove(self, obj):
try:
self._selected.remove(obj)
except Exception:
pass
def select_add(self, obj):
if obj not in self._selected:
self._selected.append(obj)
##### PICK LOGIC #####
def _do_pick(self, canvas, event, data_x, data_y, ptype, viewer):
# check for objects at this location
objs = canvas.select_items_at(viewer, (data_x, data_y))
picked = set(filter(lambda obj: obj.pickable, objs))
newly_out = self._pick_cur_objs - picked
newly_in = picked - self._pick_cur_objs
self._pick_cur_objs = picked
if ptype not in ('move', 'up'):
self._pick_sel_objs = picked
# leaving an object
for obj in newly_out:
pt = obj.crdmap.data_to((data_x, data_y))
obj.make_callback('pick-leave', canvas, event, pt)
# entering an object
for obj in newly_in:
pt = obj.crdmap.data_to((data_x, data_y))
obj.make_callback('pick-enter', canvas, event, pt)
# pick down/up
res = False
for obj in self._pick_sel_objs:
cb_name = 'pick-%s' % (ptype)
self.logger.debug("%s event in %s obj at x, y = %d, %d" % (
cb_name, obj.kind, data_x, data_y))
pt = obj.crdmap.data_to((data_x, data_y))
if obj.make_callback(cb_name, canvas, event, pt):
res = True
return res
def pick_start(self, canvas, event, data_x, data_y, viewer):
return self._do_pick(canvas, event, data_x, data_y,
'down', viewer)
def pick_motion(self, canvas, event, data_x, data_y, viewer):
return self._do_pick(canvas, event, data_x, data_y,
'move', viewer)
def pick_hover(self, canvas, event, data_x, data_y, viewer):
return self._do_pick(canvas, event, data_x, data_y,
'hover', viewer)
def pick_key(self, canvas, event, data_x, data_y, viewer):
return self._do_pick(canvas, event, data_x, data_y,
'key', viewer)
def pick_stop(self, canvas, event, data_x, data_y, viewer):
return self._do_pick(canvas, event, data_x, data_y,
'up', viewer)
# The canvas drawing
def draw(self, viewer):
# Draw everything else as usual
super(DrawingMixin, self).draw(viewer)
# Draw our current drawing object, if any
if self._draw_obj:
self._draw_obj.draw(viewer)
# Draw control points on edited objects
selected = list(self.get_selected())
if len(selected) > 0:
for obj in selected:
cr = viewer.renderer.setup_cr(obj)
obj.draw_edit(cr, viewer)
### NON-PEP8 EQUIVALENTS -- TO BE DEPRECATED ###
setSurface = set_surface
getDrawClass = get_draw_class
# END
|
{
"content_hash": "5c1259660a189545452ca916dfb7973e",
"timestamp": "",
"source": "github",
"line_count": 735,
"max_line_length": 85,
"avg_line_length": 35.477551020408164,
"alnum_prop": 0.5256557754256788,
"repo_name": "pllim/ginga",
"id": "678ab4f1240aa6d1ff7230b129f7177b3be04377",
"size": "26240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ginga/canvas/DrawingMixin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2781"
},
{
"name": "GLSL",
"bytes": "7344"
},
{
"name": "HTML",
"bytes": "2129"
},
{
"name": "JavaScript",
"bytes": "87198"
},
{
"name": "Jupyter Notebook",
"bytes": "2691970"
},
{
"name": "Makefile",
"bytes": "85"
},
{
"name": "Python",
"bytes": "4359761"
}
],
"symlink_target": ""
}
|
from flask import request, abort, jsonify, Flask
from werkzeug.contrib.cache import SimpleCache, RedisCache
from datetime import datetime
import pytz
import cPickle
import eetlijst
import calendar
import functools
# App definition
app = Flask(__name__)
app.debug = True
# Use simple cache for cli-mode. For WSGI mode use a shared cache.
if __name__ == "__main__":
cache = SimpleCache()
else:
cache = RedisCache("10.0.0.3", key_prefix="eetlijst")
def to_unix_timestamp(timestamp):
"""
Convert datetime object to unix timestamp. Input is local time, result is an
UTC timestamp.
"""
if timestamp is not None:
return calendar.timegm(timestamp.utctimetuple())
def from_unix_timestamp(timestamp):
"""
Convert unix timestamp to datetime object. Input is a UTC timestamp, result
is local time.
"""
if timestamp is not None:
return datetime.fromtimestamp(int(timestamp), tz=pytz.UTC).astimezone(
eetlijst.TZ_LOCAL)
def inject_client(func):
"""
Inject the Eetlijst client from cache, if available. Otherwise, create a new
one.
"""
@functools.wraps(func)
def _inner():
username = request.args.get("username")
password = request.args.get("password")
if not username or not password:
return abort(400)
# Fetch eetlijst client from cache
key = username + "-" + password
client = cache.get(key)
if client:
try:
client = cPickle.loads(client)
except cPickle.UnpicklingError:
client = None
if not client:
app.logger.debug("Creating new client")
try:
client = eetlijst.Eetlijst(username=username, password=password,
login=True)
except eetlijst.LoginError:
return abort(401)
else:
app.logger.debug("Continuing existing client")
# Invoke original method
try:
result = func(client)
# Store in cache again
cache.set(key, cPickle.dumps(client,
protocol=cPickle.HIGHEST_PROTOCOL), timeout=60)
except:
app.logger.debug("Client state NOT updated due to exception")
raise
return result
return _inner
@app.route("/info", methods=["GET"])
@inject_client
def get_info(client):
return jsonify({
"result": {
"name": client.get_name(),
"residents": client.get_residents()
}
})
@app.route("/status", methods=["GET"])
@inject_client
def get_status(client):
status_rows = client.get_statuses(limit=1)
return jsonify({
"result": [{
"statuses": [{
"value": status.value,
"last_changed": to_unix_timestamp(status.last_changed)
} for status in status_row.statuses ],
"deadline": to_unix_timestamp(status_row.deadline),
"timestamp": to_unix_timestamp(status_row.timestamp)
} for status_row in status_rows ]
})
@app.route("/status", methods=["POST"])
@inject_client
def set_status(client):
timestamp = from_unix_timestamp(request.args["timestamp"])
resident = request.args["resident"]
value = request.args["value"]
client.set_status(resident, value, timestamp)
return jsonify({
"result": True
})
@app.route("/noticeboard", methods=["GET"])
@inject_client
def get_noticeboard(client):
return jsonify({
"result": client.get_noticeboard()
})
@app.route("/noticeboard", methods=["POST"])
@inject_client
def set_noticeboard(client):
client.set_noticeboard(request.args["content"])
return jsonify({
"result": True
})
# E.g. `python server.py'
if __name__ == '__main__':
app.run()
|
{
"content_hash": "5d8cf3b4e67a21edad597b008a2b07d9",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 80,
"avg_line_length": 25.945945945945947,
"alnum_prop": 0.60546875,
"repo_name": "basilfx/Happening-eetlijst",
"id": "68c465377d35531e9e16d6581c54995311f17a29",
"size": "3840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "16132"
},
{
"name": "Python",
"bytes": "3840"
},
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
}
|
"""
wakatime.base
~~~~~~~~~~~~~
wakatime module entry point.
:copyright: (c) 2013 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import base64
import logging
import os
import platform
import re
import sys
import time
import traceback
import socket
try:
import ConfigParser as configparser
except ImportError:
import configparser
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'packages'))
from .__about__ import __version__
from .compat import u, open, is_py3
from .logger import setup_logging
from .offlinequeue import Queue
from .packages import argparse
from .packages import simplejson as json
from .packages.requests.exceptions import RequestException
from .project import get_project_info
from .session_cache import SessionCache
from .stats import get_file_stats
try:
from .packages import tzlocal
except:
from .packages import tzlocal3 as tzlocal
log = logging.getLogger('WakaTime')
class FileAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
values = os.path.realpath(values)
setattr(namespace, self.dest, values)
def upgradeConfigFile(configFile):
"""For backwards-compatibility, upgrade the existing config file
to work with configparser and rename from .wakatime.conf to .wakatime.cfg.
"""
if os.path.isfile(configFile):
# if upgraded cfg file already exists, don't overwrite it
return
oldConfig = os.path.join(os.path.expanduser('~'), '.wakatime.conf')
try:
configs = {
'ignore': [],
}
with open(oldConfig, 'r', encoding='utf-8') as fh:
for line in fh.readlines():
line = line.split('=', 1)
if len(line) == 2 and line[0].strip() and line[1].strip():
if line[0].strip() == 'ignore':
configs['ignore'].append(line[1].strip())
else:
configs[line[0].strip()] = line[1].strip()
with open(configFile, 'w', encoding='utf-8') as fh:
fh.write("[settings]\n")
for name, value in configs.items():
if isinstance(value, list):
fh.write("%s=\n" % name)
for item in value:
fh.write(" %s\n" % item)
else:
fh.write("%s = %s\n" % (name, value))
os.remove(oldConfig)
except IOError:
pass
def parseConfigFile(configFile=None):
"""Returns a configparser.SafeConfigParser instance with configs
read from the config file. Default location of the config file is
at ~/.wakatime.cfg.
"""
if not configFile:
configFile = os.path.join(os.path.expanduser('~'), '.wakatime.cfg')
upgradeConfigFile(configFile)
configs = configparser.SafeConfigParser()
try:
with open(configFile, 'r', encoding='utf-8') as fh:
try:
configs.readfp(fh)
except configparser.Error:
print(traceback.format_exc())
return None
except IOError:
print(u('Error: Could not read from config file {0}').format(u(configFile)))
return configs
def parseArguments(argv):
"""Parse command line arguments and configs from ~/.wakatime.cfg.
Command line arguments take precedence over config file settings.
Returns instances of ArgumentParser and SafeConfigParser.
"""
try:
sys.argv
except AttributeError:
sys.argv = argv
# define supported command line arguments
parser = argparse.ArgumentParser(
description='Common interface for the WakaTime api.')
parser.add_argument('--file', dest='targetFile', metavar='file',
action=FileAction, required=True,
help='absolute path to file for current heartbeat')
parser.add_argument('--key', dest='key',
help='your wakatime api key; uses api_key from '+
'~/.wakatime.conf by default')
parser.add_argument('--write', dest='isWrite',
action='store_true',
help='when set, tells api this heartbeat was triggered from '+
'writing to a file')
parser.add_argument('--plugin', dest='plugin',
help='optional text editor plugin name and version '+
'for User-Agent header')
parser.add_argument('--time', dest='timestamp', metavar='time',
type=float,
help='optional floating-point unix epoch timestamp; '+
'uses current time by default')
parser.add_argument('--lineno', dest='lineno',
help='optional line number; current line being edited')
parser.add_argument('--cursorpos', dest='cursorpos',
help='optional cursor position in the current file')
parser.add_argument('--notfile', dest='notfile', action='store_true',
help='when set, will accept any value for the file. for example, '+
'a domain name or other item you want to log time towards.')
parser.add_argument('--proxy', dest='proxy',
help='optional https proxy url; for example: '+
'https://user:pass@localhost:8080')
parser.add_argument('--project', dest='project',
help='optional project name')
parser.add_argument('--alternate-project', dest='alternate_project',
help='optional alternate project name; auto-discovered project takes priority')
parser.add_argument('--workplace', dest='workplace', help='workplace you are currently logging.')
parser.add_argument('--disableoffline', dest='offline',
action='store_false',
help='disables offline time logging instead of queuing logged time')
parser.add_argument('--hidefilenames', dest='hidefilenames',
action='store_true',
help='obfuscate file names; will not send file names to api')
parser.add_argument('--exclude', dest='exclude', action='append',
help='filename patterns to exclude from logging; POSIX regex '+
'syntax; can be used more than once')
parser.add_argument('--include', dest='include', action='append',
help='filename patterns to log; when used in combination with '+
'--exclude, files matching include will still be logged; '+
'POSIX regex syntax; can be used more than once')
parser.add_argument('--ignore', dest='ignore', action='append',
help=argparse.SUPPRESS)
parser.add_argument('--logfile', dest='logfile',
help='defaults to ~/.wakatime.log')
parser.add_argument('--apiurl', dest='api_url',
help='heartbeats api url; for debugging with a local server')
parser.add_argument('--config', dest='config',
help='defaults to ~/.wakatime.conf')
parser.add_argument('--verbose', dest='verbose', action='store_true',
help='turns on debug messages in log file')
parser.add_argument('--version', action='version', version=__version__)
# parse command line arguments
args = parser.parse_args(args=argv[1:])
# use current unix epoch timestamp by default
if not args.timestamp:
args.timestamp = time.time()
# parse ~/.wakatime.cfg file
configs = parseConfigFile(args.config)
if configs is None:
return args, configs
# update args from configs
if not args.key:
default_key = None
if configs.has_option('settings', 'api_key'):
default_key = configs.get('settings', 'api_key')
elif configs.has_option('settings', 'apikey'):
default_key = configs.get('settings', 'apikey')
if default_key:
args.key = default_key
else:
parser.error('Missing api key')
if not args.exclude:
args.exclude = []
if configs.has_option('settings', 'ignore'):
try:
for pattern in configs.get('settings', 'ignore').split("\n"):
if pattern.strip() != '':
args.exclude.append(pattern)
except TypeError:
pass
if configs.has_option('settings', 'exclude'):
try:
for pattern in configs.get('settings', 'exclude').split("\n"):
if pattern.strip() != '':
args.exclude.append(pattern)
except TypeError:
pass
if not args.include:
args.include = []
if configs.has_option('settings', 'include'):
try:
for pattern in configs.get('settings', 'include').split("\n"):
if pattern.strip() != '':
args.include.append(pattern)
except TypeError:
pass
if args.offline and configs.has_option('settings', 'offline'):
args.offline = configs.getboolean('settings', 'offline')
if not args.hidefilenames and configs.has_option('settings', 'hidefilenames'):
args.hidefilenames = configs.getboolean('settings', 'hidefilenames')
if not args.proxy and configs.has_option('settings', 'proxy'):
args.proxy = configs.get('settings', 'proxy')
if not args.verbose and configs.has_option('settings', 'verbose'):
args.verbose = configs.getboolean('settings', 'verbose')
if not args.verbose and configs.has_option('settings', 'debug'):
args.verbose = configs.getboolean('settings', 'debug')
if not args.logfile and configs.has_option('settings', 'logfile'):
args.logfile = configs.get('settings', 'logfile')
if not args.api_url and configs.has_option('settings', 'api_url'):
args.api_url = configs.get('settings', 'api_url')
return args, configs
def should_exclude(fileName, include, exclude):
if fileName is not None and fileName.strip() != '':
try:
for pattern in include:
try:
compiled = re.compile(pattern, re.IGNORECASE)
if compiled.search(fileName):
return False
except re.error as ex:
log.warning(u('Regex error ({msg}) for include pattern: {pattern}').format(
msg=u(ex),
pattern=u(pattern),
))
except TypeError:
pass
try:
for pattern in exclude:
try:
compiled = re.compile(pattern, re.IGNORECASE)
if compiled.search(fileName):
return pattern
except re.error as ex:
log.warning(u('Regex error ({msg}) for exclude pattern: {pattern}').format(
msg=u(ex),
pattern=u(pattern),
))
except TypeError:
pass
return False
def get_user_agent(plugin):
ver = sys.version_info
python_version = '%d.%d.%d.%s.%d' % (ver[0], ver[1], ver[2], ver[3], ver[4])
user_agent = u('wakatime/{ver} ({platform}) Python{py_ver}').format(
ver=u(__version__),
platform=u(platform.platform()),
py_ver=python_version,
)
if plugin:
user_agent = u('{user_agent} {plugin}').format(
user_agent=user_agent,
plugin=u(plugin),
)
else:
user_agent = u('{user_agent} Unknown/0').format(
user_agent=user_agent,
)
return user_agent
def get_hostname():
return socket.gethostname()
def send_heartbeat(project=None, branch=None, workplace=None, stats={}, key=None, targetFile=None,
timestamp=None, isWrite=None, plugin=None, offline=None, notfile=False,
hidefilenames=None, proxy=None, api_url=None, **kwargs):
"""Sends heartbeat as POST request to WakaTime api server.
"""
if not api_url:
api_url = 'https://wakatime.com/api/v1/heartbeats'
log.debug('Sending heartbeat to api at %s' % api_url)
data = {
'time': timestamp,
'entity': targetFile,
'type': 'file',
}
if hidefilenames and targetFile is not None and not notfile:
data['entity'] = data['entity'].rsplit('/', 1)[-1].rsplit('\\', 1)[-1]
if len(data['entity'].strip('.').split('.', 1)) > 1:
data['entity'] = u('HIDDEN.{ext}').format(ext=u(data['entity'].strip('.').rsplit('.', 1)[-1]))
else:
data['entity'] = u('HIDDEN')
if stats.get('lines'):
data['lines'] = stats['lines']
if stats.get('language'):
data['language'] = stats['language']
if stats.get('dependencies'):
data['dependencies'] = stats['dependencies']
if stats.get('lineno'):
data['lineno'] = stats['lineno']
if stats.get('cursorpos'):
data['cursorpos'] = stats['cursorpos']
if isWrite:
data['is_write'] = isWrite
if project:
data['project'] = project
if branch:
data['branch'] = branch
log.debug(data)
# setup api request
request_body = json.dumps(data)
api_key = u(base64.b64encode(str.encode(key) if is_py3 else key))
auth = u('Basic {api_key}').format(api_key=api_key)
headers = {
'User-Agent': get_user_agent(plugin),
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': auth,
'X-Machine-Name': workplace
}
log.debug(headers)
proxies = {}
if proxy:
proxies['https'] = proxy
# add Olson timezone to request
try:
tz = tzlocal.get_localzone()
except:
tz = None
if tz:
headers['TimeZone'] = u(tz.zone)
session_cache = SessionCache()
session = session_cache.get()
# log time to api
response = None
try:
response = session.post(api_url, data=request_body, headers=headers,
proxies=proxies)
except RequestException:
exception_data = {
sys.exc_info()[0].__name__: u(sys.exc_info()[1]),
}
if log.isEnabledFor(logging.DEBUG):
exception_data['traceback'] = traceback.format_exc()
if offline:
queue = Queue()
queue.push(data, json.dumps(stats), plugin)
if log.isEnabledFor(logging.DEBUG):
log.warn(exception_data)
else:
log.error(exception_data)
else:
response_code = response.status_code if response is not None else None
response_content = response.text if response is not None else None
if response_code == 201:
log.debug({
'response_code': response_code,
})
session_cache.save(session)
return True
if offline:
if response_code != 400:
queue = Queue()
queue.push(data, json.dumps(stats), plugin)
if response_code == 401:
log.error({
'response_code': response_code,
'response_content': response_content,
})
elif log.isEnabledFor(logging.DEBUG):
log.warn({
'response_code': response_code,
'response_content': response_content,
})
else:
log.error({
'response_code': response_code,
'response_content': response_content,
})
else:
log.error({
'response_code': response_code,
'response_content': response_content,
})
session_cache.delete()
return False
def main(argv=None):
if not argv:
argv = sys.argv
args, configs = parseArguments(argv)
if configs is None:
return 103 # config file parsing error
setup_logging(args, __version__)
exclude = should_exclude(args.targetFile, args.include, args.exclude)
if exclude is not False:
log.debug(u('File not logged because matches exclude pattern: {pattern}').format(
pattern=u(exclude),
))
return 0
if os.path.isfile(args.targetFile) or args.notfile:
stats = get_file_stats(args.targetFile, notfile=args.notfile,
lineno=args.lineno, cursorpos=args.cursorpos)
project, branch = None, None
if not args.notfile:
project, branch = get_project_info(configs=configs, args=args)
workplace = args.workplace
if not args.workplace:
workplace = get_hostname()
kwargs = vars(args)
kwargs['project'] = project
kwargs['branch'] = branch
kwargs['stats'] = stats
kwargs['workplace'] = workplace
if send_heartbeat(**kwargs):
queue = Queue()
while True:
heartbeat = queue.pop()
if heartbeat is None:
break
sent = send_heartbeat(
project=heartbeat['project'],
targetFile=heartbeat['file'],
timestamp=heartbeat['time'],
branch=heartbeat['branch'],
workplace=heartbeat['workplace'],
stats=json.loads(heartbeat['stats']),
key=args.key,
isWrite=heartbeat['is_write'],
plugin=heartbeat['plugin'],
offline=args.offline,
hidefilenames=args.hidefilenames,
notfile=args.notfile,
proxy=args.proxy,
api_url=args.api_url,
)
if not sent:
break
return 0 # success
return 102 # api error
else:
log.debug('File does not exist; ignoring this heartbeat.')
return 0
|
{
"content_hash": "1e59769a1fd3fb7670cbbeee23bf2eaf",
"timestamp": "",
"source": "github",
"line_count": 495,
"max_line_length": 106,
"avg_line_length": 36.448484848484846,
"alnum_prop": 0.5692273583859883,
"repo_name": "gandarez/wakatime",
"id": "b8d925c458edd5836942b367c17abe13c3828764",
"size": "18066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wakatime/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "106976"
},
{
"name": "Python",
"bytes": "6224176"
}
],
"symlink_target": ""
}
|
""" Permissions for profiles """
from rest_framework import permissions
from open_discussions.permissions import is_staff_user
def is_owner_or_privileged_user(obj_user, request):
"""Returns True if the given user matches the requesting user, or the given user is superuser/staff"""
return (
obj_user == request.user or request.user.is_superuser or is_staff_user(request)
)
class HasEditPermission(permissions.BasePermission):
"""
Only profile's User, or Jwt/Staff, or superuser has permission to edit a profile.
"""
def has_object_permission(self, request, view, obj):
"""
Only allow editing for owner of the profile, jwt_staff, or superusers
"""
if request.method in permissions.SAFE_METHODS:
return True
return is_owner_or_privileged_user(obj.user, request)
class HasSiteEditPermission(permissions.BasePermission):
"""
Permission class indicating the requesting user created a given UserWebsite or
is a superuser/staff user.
"""
def has_object_permission(
self, request, view, obj
): # pylint: disable=missing-docstring
if request.method in permissions.SAFE_METHODS:
return True
return is_owner_or_privileged_user(obj.profile.user, request)
|
{
"content_hash": "98ba1196a3e6c49633879e56b3eb1a7d",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 106,
"avg_line_length": 31.78048780487805,
"alnum_prop": 0.6884113584036838,
"repo_name": "mitodl/open-discussions",
"id": "1895b6dc15484ea2f77357487c25d1f6cc1cec45",
"size": "1303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profiles/permissions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1040"
},
{
"name": "HTML",
"bytes": "78316"
},
{
"name": "JavaScript",
"bytes": "1704037"
},
{
"name": "Procfile",
"bytes": "675"
},
{
"name": "Python",
"bytes": "2264549"
},
{
"name": "SCSS",
"bytes": "133442"
},
{
"name": "Shell",
"bytes": "11787"
},
{
"name": "TypeScript",
"bytes": "307134"
}
],
"symlink_target": ""
}
|
import collections
import numpy as np
import util
import tensorflow as tf
def _tf_int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _tf_float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _tf_bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
TFTD = collections.namedtuple('TFTD', ['dp_id', 'is_train',
'n_vars', 'n_clauses', 'CL_idxs',
'core_var_mask', 'core_clause_mask'])
def tfd_to_tftd(dp_id, is_train, tfd):
assert(0 < tfd.n_vars)
assert(0 < tfd.n_clauses)
assert(np.size(tfd.core_var_mask) == tfd.n_vars)
assert(np.any(tfd.core_var_mask))
assert(np.size(tfd.core_clause_mask) == tfd.n_clauses)
assert(np.any(tfd.core_clause_mask))
return TFTD(dp_id=dp_id,
is_train=is_train,
n_vars=tfd.n_vars,
n_clauses=tfd.n_clauses,
CL_idxs=tfd.CL_idxs,
core_var_mask=tfd.core_var_mask,
core_clause_mask=tfd.core_clause_mask)
def tftd_to_example(tftd):
return tf.train.Example(
features=tf.train.Features(
feature={
'dp_id' : _tf_int64_feature(tftd.dp_id),
'is_train' : _tf_int64_feature(np.int64(tftd.is_train)),
'n_vars' : _tf_int64_feature(tftd.n_vars),
'n_clauses' : _tf_int64_feature(tftd.n_clauses),
'n_cells' : _tf_int64_feature(np.shape(tftd.CL_idxs)[0]),
'CL_idxs' : _tf_bytes_feature(tftd.CL_idxs.astype(np.int32).tostring()),
'core_var_mask' : _tf_bytes_feature(tftd.core_var_mask.astype(np.int32).tostring()),
'core_clause_mask' : _tf_bytes_feature(tftd.core_clause_mask.astype(np.int32).tostring())
}))
def example_to_tftd(example):
features = tf.parse_single_example(
example,
features={
'dp_id' : tf.io.FixedLenFeature([], dtype=tf.int64),
'is_train' : tf.io.FixedLenFeature([], dtype=tf.int64),
'n_vars' : tf.io.FixedLenFeature([], dtype=tf.int64),
'n_clauses' : tf.io.FixedLenFeature([], dtype=tf.int64),
'n_cells' : tf.io.FixedLenFeature([], dtype=tf.int64),
'CL_idxs' : tf.io.FixedLenFeature([], dtype=tf.string),
'core_var_mask' : tf.io.FixedLenFeature([], dtype=tf.string),
'core_clause_mask' : tf.io.FixedLenFeature([], dtype=tf.string)
})
dp_id = features['dp_id']
is_train = features['is_train']
n_vars = features['n_vars']
n_clauses = features['n_clauses']
CL_idxs = tf.reshape(tf.decode_raw(features['CL_idxs'], tf.int32), [features['n_cells'], 2])
core_var_mask = tf.cast(tf.reshape(tf.decode_raw(features['core_var_mask'], tf.int32), [features['n_vars']]), tf.bool)
core_clause_mask = tf.cast(tf.reshape(tf.decode_raw(features['core_clause_mask'], tf.int32), [features['n_clauses']]), tf.bool)
asserts = [
tf.Assert(0 < n_vars, [n_vars], name="ASSERT_n_vars_pos"),
tf.Assert(0 < n_clauses, [n_clauses], name="ASSERT_n_clauses_pos"),
tf.Assert(tf.equal(tf.size(core_var_mask), tf.cast(n_vars, tf.int32)), [core_var_mask], name="CORE_VARS_N_VARS"),
tf.Assert(tf.reduce_any(core_var_mask), [core_var_mask], name="CORE_VARS_EXIST"),
tf.Assert(tf.equal(tf.size(core_clause_mask), tf.cast(n_clauses, tf.int32)), [core_clause_mask], name="CORE_CLAUSES_N_CLAUSES"),
tf.Assert(tf.reduce_any(core_clause_mask), [core_clause_mask], name="CORE_CLAUSES_EXIST")
]
with tf.control_dependencies(asserts):
return TFTD(dp_id=dp_id,
is_train=tf.cast(is_train, tf.bool),
n_vars=n_vars,
n_clauses=n_clauses,
CL_idxs=CL_idxs,
core_var_mask=core_var_mask,
core_clause_mask=core_clause_mask)
####################
def test_example_to_tftd():
from nose.tools import assert_equals, assert_true
import tempfile
tfropts = tf.io.TFRecordOptions(compression_type=tf.io.TFRecordCompressionType.GZIP)
out_file = tempfile.NamedTemporaryFile()
writer = tf.io.TFRecordWriter(out_file.name, options=tfropts)
N_EXAMPLES = 50
def sample_tftd():
n_vars = np.random.randint(10000)
n_clauses = np.random.randint(100000)
n_cells = np.random.randint(1000000)
return TFTD(dp_id=np.random.randint(10000),
is_train=util.flip(0.5),
n_vars=n_vars,
n_clauses=n_clauses,
CL_idxs=np.random.randint(n_clauses, size=(n_cells, 2), dtype=np.int32),
core_var_mask=(np.random.randint(2, size=(n_vars), dtype=np.int32) < 1),
core_clause_mask=(np.random.randint(2, size=(n_clauses), dtype=np.int32) < 1))
tftds = [sample_tftd() for _ in range(N_EXAMPLES)]
for tftd in tftds:
writer.write(tftd_to_example(tftd).SerializeToString())
writer.close()
dataset = tf.data.TFRecordDataset([out_file.name], compression_type="GZIP")
dataset = dataset.map(example_to_tftd)
next_tftd = dataset.make_one_shot_iterator().get_next()
sess = tf.Session()
for i in range(N_EXAMPLES):
tftd1 = tftds[i]
tftd2 = sess.run(next_tftd)
assert_equals(tftd1.dp_id, tftd2.dp_id)
assert_equals(tftd1.is_train, tftd2.is_train)
assert_equals(tftd1.n_vars, tftd2.n_vars)
assert_equals(tftd1.n_clauses, tftd2.n_clauses)
assert_true((tftd1.CL_idxs == tftd2.CL_idxs).all())
assert_true((tftd1.core_var_mask == tftd2.core_var_mask).all())
assert_true((tftd1.core_clause_mask == tftd2.core_clause_mask).all())
|
{
"content_hash": "6f3d46b7dbdde90914ae99cd34dd2eb5",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 136,
"avg_line_length": 45.10294117647059,
"alnum_prop": 0.573850668405608,
"repo_name": "dselsam/neurocore-public",
"id": "870e5adf0381c33d2ad8f551f67992b735f7506b",
"size": "6134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/tftd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "34286"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Python",
"bytes": "73398"
}
],
"symlink_target": ""
}
|
"""Multi-credential file store with lock support.
This module implements a JSON credential store where multiple
credentials can be stored in one file. That file supports locking
both in a single process and across processes.
The credential themselves are keyed off of:
* client_id
* user_agent
* scope
The format of the stored data is like so:
{
'file_version': 1,
'data': [
{
'key': {
'clientId': '<client id>',
'userAgent': '<user agent>',
'scope': '<scope>'
},
'credential': {
# JSON serialized Credentials.
}
}
]
}
"""
__author__ = 'jbeda@google.com (Joe Beda)'
import base64
import fcntl
import logging
import os
import threading
try: # pragma: no cover
import simplejson
except ImportError: # pragma: no cover
try:
# Try to import from django, should work on App Engine
from django.utils import simplejson
except ImportError:
# Should work for Python2.6 and higher.
import json as simplejson
from client import Storage as BaseStorage
from client import Credentials
logger = logging.getLogger(__name__)
# A dict from 'filename'->_MultiStore instances
_multistores = {}
_multistores_lock = threading.Lock()
class Error(Exception):
"""Base error for this module."""
pass
class NewerCredentialStoreError(Error):
"""The credential store is a newer version that supported."""
pass
def get_credential_storage(filename, client_id, user_agent, scope,
warn_on_readonly=True):
"""Get a Storage instance for a credential.
Args:
filename: The JSON file storing a set of credentials
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: string or list of strings, Scope(s) being requested
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
An object derived from client.Storage for getting/setting the
credential.
"""
filename = os.path.realpath(os.path.expanduser(filename))
_multistores_lock.acquire()
try:
multistore = _multistores.setdefault(
filename, _MultiStore(filename, warn_on_readonly))
finally:
_multistores_lock.release()
if type(scope) is list:
scope = ' '.join(scope)
return multistore._get_storage(client_id, user_agent, scope)
class _MultiStore(object):
"""A file backed store for multiple credentials."""
def __init__(self, filename, warn_on_readonly=True):
"""Initialize the class.
This will create the file if necessary.
"""
self._filename = filename
self._thread_lock = threading.Lock()
self._file_handle = None
self._read_only = False
self._warn_on_readonly = warn_on_readonly
self._create_file_if_needed()
# Cache of deserialized store. This is only valid after the
# _MultiStore is locked or _refresh_data_cache is called. This is
# of the form of:
#
# (client_id, user_agent, scope) -> OAuth2Credential
#
# If this is None, then the store hasn't been read yet.
self._data = None
class _Storage(BaseStorage):
"""A Storage object that knows how to read/write a single credential."""
def __init__(self, multistore, client_id, user_agent, scope):
self._multistore = multistore
self._client_id = client_id
self._user_agent = user_agent
self._scope = scope
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
self._multistore._lock()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._multistore._unlock()
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
credential = self._multistore._get_credential(
self._client_id, self._user_agent, self._scope)
if credential:
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._update_credential(credentials, self._scope)
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._filename):
old_umask = os.umask(0177)
try:
open(self._filename, 'a+').close()
finally:
os.umask(old_umask)
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
# Check to see if the file is writeable.
if os.access(self._filename, os.W_OK):
self._file_handle = open(self._filename, 'r+')
fcntl.lockf(self._file_handle.fileno(), fcntl.LOCK_EX)
else:
# Cannot open in read/write mode. Open only in read mode.
self._file_handle = open(self._filename, 'r')
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. Opening in '
'read-only mode. Any refreshed credentials will only be '
'valid for this run.' % self._filename)
if os.path.getsize(self._filename) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _unlock(self):
"""Release the lock on the multistore."""
if not self._read_only:
fcntl.lockf(self._file_handle.fileno(), fcntl.LOCK_UN)
self._file_handle.close()
self._thread_lock.release()
def _locked_json_read(self):
"""Get the raw content of the multistore file.
The multistore must be locked when this is called.
Returns:
The contents of the multistore decoded as JSON.
"""
assert self._thread_lock.locked()
self._file_handle.seek(0)
return simplejson.load(self._file_handle)
def _locked_json_write(self, data):
"""Write a JSON serializable data structure to the multistore.
The multistore must be locked when this is called.
Args:
data: The data to be serialized and written.
"""
assert self._thread_lock.locked()
if self._read_only:
return
self._file_handle.seek(0)
simplejson.dump(data, self._file_handle, sort_keys=True, indent=2)
self._file_handle.truncate()
def _refresh_data_cache(self):
"""Refresh the contents of the multistore.
The multistore must be locked when this is called.
Raises:
NewerCredentialStoreError: Raised when a newer client has written the
store.
"""
self._data = {}
try:
raw_data = self._locked_json_read()
except Exception:
logger.warn('Credential data store could not be loaded. '
'Will ignore and overwrite.')
return
version = 0
try:
version = raw_data['file_version']
except Exception:
logger.warn('Missing version for credential data store. It may be '
'corrupt or an old version. Overwriting.')
if version > 1:
raise NewerCredentialStoreError(
'Credential file has file_version of %d. '
'Only file_version of 1 is supported.' % version)
credentials = []
try:
credentials = raw_data['data']
except (TypeError, KeyError):
pass
for cred_entry in credentials:
try:
(key, credential) = self._decode_credential_from_json(cred_entry)
self._data[key] = credential
except:
# If something goes wrong loading a credential, just ignore it
logger.info('Error decoding credential, skipping', exc_info=True)
def _decode_credential_from_json(self, cred_entry):
"""Load a credential from our JSON serialization.
Args:
cred_entry: A dict entry from the data member of our format
Returns:
(key, cred) where the key is the key tuple and the cred is the
OAuth2Credential object.
"""
raw_key = cred_entry['key']
client_id = raw_key['clientId']
user_agent = raw_key['userAgent']
scope = raw_key['scope']
key = (client_id, user_agent, scope)
credential = None
credential = Credentials.new_from_json(simplejson.dumps(cred_entry['credential']))
return (key, credential)
def _write(self):
"""Write the cached data back out.
The multistore must be locked.
"""
raw_data = {'file_version': 1}
raw_creds = []
raw_data['data'] = raw_creds
for (cred_key, cred) in self._data.items():
raw_key = {
'clientId': cred_key[0],
'userAgent': cred_key[1],
'scope': cred_key[2]
}
raw_cred = simplejson.loads(cred.to_json())
raw_creds.append({'key': raw_key, 'credential': raw_cred})
self._locked_json_write(raw_data)
def _get_credential(self, client_id, user_agent, scope):
"""Get a credential from the multistore.
The multistore must be locked.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: A string for the scope(s) being requested
Returns:
The credential specified or None if not present
"""
key = (client_id, user_agent, scope)
return self._data.get(key, None)
def _update_credential(self, cred, scope):
"""Update a credential and write the multistore.
This must be called when the multistore is locked.
Args:
cred: The OAuth2Credential to update/set
scope: The scope(s) that this credential covers
"""
key = (cred.client_id, cred.user_agent, scope)
self._data[key] = cred
self._write()
def _get_storage(self, client_id, user_agent, scope):
"""Get a Storage object to get/set a credential.
This Storage is a 'view' into the multistore.
Args:
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: A string for the scope(s) being requested
Returns:
A Storage object that can be used to get/set this cred
"""
return self._Storage(self, client_id, user_agent, scope)
|
{
"content_hash": "3d561c0caef865372aaa454160a5c862",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 86,
"avg_line_length": 29.671232876712327,
"alnum_prop": 0.6478301015697138,
"repo_name": "aadityabhatia/testzone2011-ScatterGraph",
"id": "cf43dd9bc860e65afde76c8282ea912a859dc6d6",
"size": "10881",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "oauth2client/multistore_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "154034"
}
],
"symlink_target": ""
}
|
import copy
import logging
import os
import re
import reportlab
from reportlab.lib.enums import TA_LEFT
from reportlab.lib.fonts import addMapping
from reportlab.lib.pagesizes import landscape, A4
from reportlab.lib.styles import ParagraphStyle
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus.frames import Frame, ShowBoundaryValue
from reportlab.platypus.paraparser import ParaFrag, ps2tt, tt2ps
import six
import xhtml2pdf.default
import xhtml2pdf.parser
from xhtml2pdf.util import getSize, getCoords, getFile, pisaFileObject, \
getFrameDimensions, getColor, set_value, copy_attrs
from xhtml2pdf.w3c import css
from xhtml2pdf.xhtml2pdf_reportlab import PmlPageTemplate, PmlTableOfContents, \
PmlParagraph, PmlParagraphAndImage, PmlPageCount
TupleType = tuple
ListType = list
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
reportlab.rl_config.warnOnMissingFontGlyphs = 0
log = logging.getLogger("xhtml2pdf")
sizeDelta = 2 # amount to reduce font size by for super and sub script
subFraction = 0.4 # fraction of font size that a sub script should be lowered
superFraction = 0.4
NBSP = u"\u00a0"
def clone(self, **kwargs):
n = ParaFrag(**self.__dict__)
if kwargs:
d = n.__dict__
d.update(kwargs)
# This else could cause trouble in Paragraphs with images etc.
if "cbDefn" in d:
del d["cbDefn"]
n.bulletText = None
return n
ParaFrag.clone = clone
def getParaFrag(style):
frag = ParaFrag()
set_value(frag,
('sub', 'super', 'rise', 'underline', 'strike', 'greek',
'leading', 'leadingSpace', 'spaceBefore',
'spaceAfter', 'leftIndent', 'rightIndent', 'firstLineIndent',
'borderPadding', 'paddingLeft', 'paddingRight',
'paddingTop', 'paddingBottom', 'bulletIndent',
'insideStaticFrame', 'outlineLevel'
),
0)
set_value(frag,
('backColor', 'vAlign', 'link', 'borderStyle',
'borderColor', 'listStyleType', 'listStyleImage',
'wordWrap', 'height', 'width', 'bulletText'
),
None
)
set_value(frag,
('pageNumber', 'pageCount', 'outline',
'outlineOpen', 'keepWithNext'),
False)
frag.text = ""
frag.fontName = "Times-Roman"
frag.fontName, frag.bold, frag.italic = ps2tt(style.fontName)
frag.fontSize = style.fontSize
frag.textColor = style.textColor
# Extras
frag.letterSpacing = "normal"
frag.leadingSource = "150%"
frag.alignment = TA_LEFT
frag.borderWidth = 1
frag.borderLeftWidth = frag.borderWidth
frag.borderLeftColor = frag.borderColor
frag.borderLeftStyle = frag.borderStyle
frag.borderRightWidth = frag.borderWidth
frag.borderRightColor = frag.borderColor
frag.borderRightStyle = frag.borderStyle
frag.borderTopWidth = frag.borderWidth
frag.borderTopColor = frag.borderColor
frag.borderTopStyle = frag.borderStyle
frag.borderBottomWidth = frag.borderWidth
frag.borderBottomColor = frag.borderColor
frag.borderBottomStyle = frag.borderStyle
frag.whiteSpace = "normal"
frag.bulletFontName = "Helvetica"
frag.zoom = 1.0
return frag
def getDirName(path):
parts = urlparse.urlparse(path)
if parts.scheme:
return path
else:
return os.path.dirname(os.path.abspath(path))
class pisaCSSBuilder(css.CSSBuilder):
def atFontFace(self, declarations):
"""
Embed fonts
"""
result = self.ruleset([self.selector('*')], declarations)
data = list(result[0].values())[0]
if "src" not in data:
# invalid - source is required, ignore this specification
return {}, {}
names = data["font-family"]
# Font weight
fweight = str(data.get("font-weight", "normal")).lower()
bold = fweight in ("bold", "bolder", "500", "600", "700", "800", "900")
if not bold and fweight != "normal":
log.warn(
self.c.warning("@fontface, unknown value font-weight '%s'", fweight))
# Font style
italic = str(
data.get("font-style", "")).lower() in ("italic", "oblique")
# The "src" attribute can be a CSS group but in that case
# ignore everything except the font URI
uri = data['src']
if not isinstance(data['src'], str):
for part in uri:
if isinstance(part, str):
uri = part
break
src = self.c.getFile(uri, relative=self.c.cssParser.rootPath)
self.c.loadFont(
names,
src,
bold=bold,
italic=italic)
return {}, {}
def _pisaAddFrame(self, name, data, first=False, border=None, size=(0, 0)):
c = self.c
if not name:
name = "-pdf-frame-%d" % c.UID()
if data.get('is_landscape', False):
size = (size[1], size[0])
x, y, w, h = getFrameDimensions(data, size[0], size[1])
# print name, x, y, w, h
# if not (w and h):
# return None
if first:
return name, None, data.get("-pdf-frame-border", border), x, y, w, h, data
return (name, data.get("-pdf-frame-content", None),
data.get("-pdf-frame-border", border), x, y, w, h, data)
def _getFromData(self, data, attr, default=None, func=None):
if not func:
func = lambda x: x
if type(attr) in (list, tuple):
for a in attr:
if a in data:
return func(data[a])
return default
else:
if attr in data:
return func(data[attr])
return default
def atPage(self, name, pseudopage, declarations):
c = self.c
data = {}
name = name or "body"
pageBorder = None
if declarations:
result = self.ruleset([self.selector('*')], declarations)
if declarations:
try:
data = result[0].values()[0]
except Exception:
data = result[0].popitem()[1]
pageBorder = data.get("-pdf-frame-border", None)
if name in c.templateList:
log.warn(
self.c.warning("template '%s' has already been defined", name))
if "-pdf-page-size" in data:
c.pageSize = xhtml2pdf.default.PML_PAGESIZES.get(
str(data["-pdf-page-size"]).lower(), c.pageSize)
isLandscape = False
if "size" in data:
size = data["size"]
if type(size) is not ListType:
size = [size]
sizeList = []
for value in size:
valueStr = str(value).lower()
if type(value) is TupleType:
sizeList.append(getSize(value))
elif valueStr == "landscape":
isLandscape = True
elif valueStr == "portrait":
isLandscape = False
elif valueStr in xhtml2pdf.default.PML_PAGESIZES:
c.pageSize = xhtml2pdf.default.PML_PAGESIZES[valueStr]
else:
raise RuntimeError("Unknown size value for @page")
if len(sizeList) == 2:
c.pageSize = tuple(sizeList)
if isLandscape:
c.pageSize = landscape(c.pageSize)
padding_top = self._getFromData(data, 'padding-top', 0, getSize)
padding_left = self._getFromData(data, 'padding-left', 0, getSize)
padding_right = self._getFromData(data, 'padding-right', 0, getSize)
padding_bottom = self._getFromData(data, 'padding-bottom', 0, getSize)
border_color = self._getFromData(data, ('border-top-color', 'border-bottom-color',
'border-left-color', 'border-right-color'), None, getColor)
border_width = self._getFromData(data, ('border-top-width', 'border-bottom-width',
'border-left-width', 'border-right-width'), 0, getSize)
for prop in ("margin-top", "margin-left", "margin-right", "margin-bottom",
"top", "left", "right", "bottom", "width", "height"):
if prop in data:
c.frameList.append(
self._pisaAddFrame(name, data, first=True, border=pageBorder, size=c.pageSize))
break
# Frames have to be calculated after we know the pagesize
frameList = []
staticList = []
for fname, static, border, x, y, w, h, fdata in c.frameList:
fpadding_top = self._getFromData(
fdata, 'padding-top', padding_top, getSize)
fpadding_left = self._getFromData(
fdata, 'padding-left', padding_left, getSize)
fpadding_right = self._getFromData(
fdata, 'padding-right', padding_right, getSize)
fpadding_bottom = self._getFromData(
fdata, 'padding-bottom', padding_bottom, getSize)
fborder_color = self._getFromData(fdata, ('border-top-color', 'border-bottom-color',
'border-left-color', 'border-right-color'), border_color, getColor)
fborder_width = self._getFromData(fdata, ('border-top-width', 'border-bottom-width',
'border-left-width', 'border-right-width'), border_width, getSize)
if border or pageBorder:
frame_border = ShowBoundaryValue()
else:
frame_border = ShowBoundaryValue(
color=fborder_color, width=fborder_width)
# fix frame sizing problem.
if static:
x, y, w, h = getFrameDimensions(
fdata, c.pageSize[0], c.pageSize[1])
x, y, w, h = getCoords(x, y, w, h, c.pageSize)
if w <= 0 or h <= 0:
log.warn(
self.c.warning("Negative width or height of frame. Check @frame definitions."))
frame = Frame(
x, y, w, h,
id=fname,
leftPadding=fpadding_left,
rightPadding=fpadding_right,
bottomPadding=fpadding_bottom,
topPadding=fpadding_top,
showBoundary=frame_border)
if static:
frame.pisaStaticStory = []
c.frameStatic[static] = [frame] + c.frameStatic.get(static, [])
staticList.append(frame)
else:
frameList.append(frame)
background = data.get("background-image", None)
if background:
# should be relative to the css file
background = self.c.getFile(
background, relative=self.c.cssParser.rootPath)
if not frameList:
log.warn(
c.warning("missing explicit frame definition for content or just static frames"))
fname, static, border, x, y, w, h, data = self._pisaAddFrame(name, data, first=True, border=pageBorder,
size=c.pageSize)
x, y, w, h = getCoords(x, y, w, h, c.pageSize)
if w <= 0 or h <= 0:
log.warn(
c.warning("Negative width or height of frame. Check @page definitions."))
if border or pageBorder:
frame_border = ShowBoundaryValue()
else:
frame_border = ShowBoundaryValue(
color=border_color, width=border_width)
frameList.append(Frame(
x, y, w, h,
id=fname,
leftPadding=padding_left,
rightPadding=padding_right,
bottomPadding=padding_bottom,
topPadding=padding_top,
showBoundary=frame_border))
pt = PmlPageTemplate(
id=name,
frames=frameList,
pagesize=c.pageSize,
)
pt.pisaStaticList = staticList
pt.pisaBackground = background
pt.pisaBackgroundList = c.pisaBackgroundList
if isLandscape:
pt.pageorientation = pt.LANDSCAPE
c.templateList[name] = pt
c.template = None
c.frameList = []
c.frameStaticList = []
return {}, {}
def atFrame(self, name, declarations):
if declarations:
result = self.ruleset([self.selector('*')], declarations)
# print "@BOX", name, declarations, result
data = result[0]
if data:
try:
data = data.values()[0]
except Exception:
data = data.popitem()[1]
self.c.frameList.append(
self._pisaAddFrame(name, data, size=self.c.pageSize))
return {}, {} # TODO: It always returns empty dicts?
class pisaCSSParser(css.CSSParser):
def parseExternal(self, cssResourceName):
result=None
oldRootPath = self.rootPath
cssFile = self.c.getFile(cssResourceName, relative=self.rootPath)
if not cssFile:
return None
if self.rootPath and urlparse.urlparse(self.rootPath).scheme:
self.rootPath = urlparse.urljoin(self.rootPath, cssResourceName)
else:
self.rootPath = getDirName(cssFile.uri)
try:
result = self.parse(cssFile.getData())
self.rootPath = oldRootPath
except Exception as e:
print(e)
return result
class pisaContext(object):
"""
Helper class for creation of reportlab story and container for
various data.
"""
def __init__(self, path, debug=0, capacity=-1):
self.fontList = copy.copy(xhtml2pdf.default.DEFAULT_FONT)
set_value(self,
('path', 'story', 'text', 'log', 'frameStaticList',
'pisaBackgroundList', 'frameList', 'anchorFrag',
'anchorName', 'fragList', 'fragAnchor', 'fragStack'
), [], _copy=True)
set_value(self, ('node', 'indexing_story',
'template', 'keepInFrameIndex',
'tableData', 'image'),
None)
set_value(self, ('err', 'warn', 'uidctr', 'listCounter'), 0)
set_value(self, ('text', 'cssText', 'cssDefaultText'), "")
set_value(self, ('templateList', 'frameStatic', 'imageData'),
{}, _copy=True)
self.capacity = capacity
self.toc = PmlTableOfContents()
self.multiBuild = False
self.pageSize = A4
self.baseFontSize = getSize("12pt")
self.frag = self.fragBlock = getParaFrag(
ParagraphStyle('default%d' % self.UID()))
self.fragStrip = True
self.force = False
# External callback function for path calculations
self.pathCallback = None
# Store path to document
self.pathDocument = path or "__dummy__"
parts = urlparse.urlparse(self.pathDocument)
if not parts.scheme:
self.pathDocument = os.path.abspath(self.pathDocument)
self.pathDirectory = getDirName(self.pathDocument)
self.meta = dict(
author="",
title="",
subject="",
keywords="",
pagesize=A4,
)
def UID(self):
self.uidctr += 1
return self.uidctr
# METHODS FOR CSS
def addCSS(self, value):
value = value.strip()
if value.startswith("<![CDATA["):
value = value[9: - 3]
if value.startswith("<!--"):
value = value[4: - 3]
self.cssText += value.strip() + "\n"
# METHODS FOR CSS
def addDefaultCSS(self, value):
value = value.strip()
if value.startswith("<![CDATA["):
value = value[9: - 3]
if value.startswith("<!--"):
value = value[4: - 3]
self.cssDefaultText += value.strip() + "\n"
def parseCSS(self):
# This self-reference really should be refactored. But for now
# we'll settle for using weak references. This avoids memory
# leaks because the garbage collector (at least on cPython
# 2.7.3) isn't aggressive enough.
import weakref
self.cssBuilder = pisaCSSBuilder(mediumSet=["all", "print", "pdf"])
#self.cssBuilder.c = self
self.cssBuilder._c = weakref.ref(self)
pisaCSSBuilder.c = property(lambda self: self._c())
self.cssParser = pisaCSSParser(self.cssBuilder)
self.cssParser.rootPath = self.pathDirectory
#self.cssParser.c = self
self.cssParser._c = weakref.ref(self)
pisaCSSParser.c = property(lambda self: self._c())
self.css = self.cssParser.parse(self.cssText)
self.cssDefault = self.cssParser.parse(self.cssDefaultText)
self.cssCascade = css.CSSCascadeStrategy(
userAgent=self.cssDefault, user=self.css)
self.cssCascade.parser = self.cssParser
# METHODS FOR STORY
def addStory(self, data):
self.story.append(data)
def swapStory(self, story=None):
story = story if story is not None else []
self.story, story = copy.copy(story), copy.copy(self.story)
return story
def toParagraphStyle(self, first):
style = ParagraphStyle(
'default%d' % self.UID(), keepWithNext=first.keepWithNext)
copy_attrs(style, first,
('fontName', 'fontSize', 'letterSpacing', 'backColor',
'spaceBefore', 'spaceAfter', 'leftIndent', 'rightIndent',
'firstLineIndent', 'textColor', 'alignment',
'bulletIndent', 'wordWrap', 'borderTopStyle',
'borderTopWidth', 'borderTopColor',
'borderBottomStyle', 'borderBottomWidth',
'borderBottomColor', 'borderLeftStyle',
'borderLeftWidth', 'borderLeftColor',
'borderRightStyle', 'borderRightWidth',
'borderRightColor', 'paddingTop', 'paddingBottom',
'paddingLeft', 'paddingRight', 'borderPadding'
)
)
style.leading = max(
first.leading + first.leadingSpace, first.fontSize * 1.25)
style.bulletFontName = first.bulletFontName or first.fontName
style.bulletFontSize = first.fontSize
# Border handling for Paragraph
# Transfer the styles for each side of the border, *not* the whole
# border values that reportlab supports. We'll draw them ourselves in
# PmlParagraph.
# If no border color is given, the text color is used (XXX Tables!)
if (style.borderTopColor is None) and style.borderTopWidth:
style.borderTopColor = first.textColor
if (style.borderBottomColor is None) and style.borderBottomWidth:
style.borderBottomColor = first.textColor
if (style.borderLeftColor is None) and style.borderLeftWidth:
style.borderLeftColor = first.textColor
if (style.borderRightColor is None) and style.borderRightWidth:
style.borderRightColor = first.textColor
style.fontName = tt2ps(first.fontName, first.bold, first.italic)
return style
def addTOC(self):
styles = []
for i in six.moves.range(20):
self.node.attributes["class"] = "pdftoclevel%d" % i
self.cssAttr = xhtml2pdf.parser.CSSCollect(self.node, self)
xhtml2pdf.parser.CSS2Frag(self, {
"margin-top": 0,
"margin-bottom": 0,
"margin-left": 0,
"margin-right": 0,
}, True)
pstyle = self.toParagraphStyle(self.frag)
styles.append(pstyle)
self.toc.levelStyles = styles
self.addStory(self.toc)
self.indexing_story = None
def addPageCount(self):
if not self.multiBuild:
self.indexing_story = PmlPageCount()
self.multiBuild = True
def dumpPara(self, frags, style):
return
def addPara(self, force=False):
force = (force or self.force)
self.force = False
# Cleanup the trail
rfragList = reversed(self.fragList)
# Find maximum lead
maxLeading = 0
#fontSize = 0
for frag in self.fragList:
leading = getSize(
frag.leadingSource, frag.fontSize) + frag.leadingSpace
maxLeading = max(
leading, frag.fontSize + frag.leadingSpace, maxLeading)
frag.leading = leading
if force or (self.text.strip() and self.fragList):
# Update paragraph style by style of first fragment
first = self.fragBlock
style = self.toParagraphStyle(first)
# style.leading = first.leading + first.leadingSpace
if first.leadingSpace:
style.leading = maxLeading
else:
style.leading = getSize(
first.leadingSource, first.fontSize) + first.leadingSpace
bulletText = copy.copy(first.bulletText)
first.bulletText = None
# Add paragraph to story
if force or len(self.fragAnchor + self.fragList) > 0:
# We need this empty fragment to work around problems in
# Reportlab paragraphs regarding backGround etc.
if self.fragList:
self.fragList.append(self.fragList[- 1].clone(text=''))
else:
blank = self.frag.clone()
blank.fontName = "Helvetica"
blank.text = ''
self.fragList.append(blank)
self.dumpPara(self.fragAnchor + self.fragList, style)
para = PmlParagraph(
self.text,
style,
frags=self.fragAnchor + self.fragList,
bulletText=bulletText)
para.outline = first.outline
para.outlineLevel = first.outlineLevel
para.outlineOpen = first.outlineOpen
para.keepWithNext = first.keepWithNext
para.autoLeading = "max"
if self.image:
para = PmlParagraphAndImage(
para,
self.image,
side=self.imageData.get("align", "left"))
self.addStory(para)
self.fragAnchor = []
first.bulletText = None
# Reset data
self.image = None
self.imageData = {}
self.clearFrag()
# METHODS FOR FRAG
def clearFrag(self):
self.fragList = []
self.fragStrip = True
self.text = u""
def copyFrag(self, **kw):
return self.frag.clone(**kw)
def newFrag(self, **kw):
self.frag = self.frag.clone(**kw)
return self.frag
def _appendFrag(self, frag):
if frag.link and frag.link.startswith("#"):
self.anchorFrag.append((frag, frag.link[1:]))
self.fragList.append(frag)
# XXX Argument frag is useless!
def addFrag(self, text="", frag=None):
frag = baseFrag = self.frag.clone()
# if sub and super are both on they will cancel each other out
if frag.sub == 1 and frag.super == 1:
frag.sub = 0
frag.super = 0
# XXX Has to be replaced by CSS styles like vertical-align and
# font-size
if frag.sub:
frag.rise = - frag.fontSize * subFraction
frag.fontSize = max(frag.fontSize - sizeDelta, 3)
elif frag.super:
frag.rise = frag.fontSize * superFraction
frag.fontSize = max(frag.fontSize - sizeDelta, 3)
# bold, italic, and underline
frag.fontName = frag.bulletFontName = tt2ps(
frag.fontName, frag.bold, frag.italic)
# Replace ­ with empty and normalize NBSP
text = (text
.replace(u"\xad", u"")
.replace(u"\xc2\xa0", NBSP)
.replace(u"\xa0", NBSP))
if frag.whiteSpace == "pre":
# Handle by lines
for text in re.split(r'(\r\n|\n|\r)', text):
# This is an exceptionally expensive piece of code
self.text += text
if ("\n" in text) or ("\r" in text):
# If EOL insert a linebreak
frag = baseFrag.clone()
frag.text = ""
frag.lineBreak = 1
self._appendFrag(frag)
else:
# Handle tabs in a simple way
text = text.replace(u"\t", 8 * u" ")
# Somehow for Reportlab NBSP have to be inserted
# as single character fragments
for text in re.split(r'(\ )', text):
frag = baseFrag.clone()
if text == " ":
text = NBSP
frag.text = text
self._appendFrag(frag)
else:
for text in re.split(u'(' + NBSP + u')', text):
frag = baseFrag.clone()
if text == NBSP:
self.force = True
frag.text = NBSP
self.text += text
self._appendFrag(frag)
else:
frag.text = " ".join(("x" + text + "x").split())[1: - 1]
if self.fragStrip:
frag.text = frag.text.lstrip()
if frag.text:
self.fragStrip = False
self.text += frag.text
self._appendFrag(frag)
def pushFrag(self):
self.fragStack.append(self.frag)
self.newFrag()
def pullFrag(self):
self.frag = self.fragStack.pop()
# XXX
def _getFragment(self, l=20):
try:
return repr(" ".join(self.node.toxml().split()[:l]))
except:
return ""
def _getLineNumber(self):
return 0
def context(self, msg):
return "%s\n%s" % (
str(msg),
self._getFragment(50))
def warning(self, msg, *args):
self.warn += 1
self.log.append((xhtml2pdf.default.PML_WARNING, self._getLineNumber(), str(
msg), self._getFragment(50)))
try:
return self.context(msg % args)
except:
return self.context(msg)
def error(self, msg, *args):
self.err += 1
self.log.append((xhtml2pdf.default.PML_ERROR, self._getLineNumber(), str(
msg), self._getFragment(50)))
try:
return self.context(msg % args)
except:
return self.context(msg)
# UTILS
def _getFileDeprecated(self, name, relative):
try:
path = relative or self.pathDirectory
if name.startswith("data:"):
return name
if self.pathCallback is not None:
nv = self.pathCallback(name, relative)
else:
if path is None:
log.warn(
"Could not find main directory for getting filename. Use CWD")
path = os.getcwd()
nv = os.path.normpath(os.path.join(path, name))
if not (nv and os.path.isfile(nv)):
nv = None
if nv is None:
log.warn(self.warning("File '%s' does not exist", name))
return nv
except:
log.warn(
self.warning("getFile %r %r %r", name, relative, path), exc_info=1)
def getFile(self, name, relative=None):
"""
Returns a file name or None
"""
if self.pathCallback is not None:
return getFile(self._getFileDeprecated(name, relative))
return getFile(name, relative or self.pathDirectory)
def getFontName(self, names, default="helvetica"):
"""
Name of a font
"""
# print names, self.fontList
if type(names) is not ListType:
if type(names) not in six.string_types:
names = str(names)
names = names.strip().split(",")
for name in names:
if type(name) not in six.string_types:
name = str(name)
font = self.fontList.get(name.strip().lower(), None)
if font is not None:
return font
return self.fontList.get(default, None)
def registerFont(self, fontname, alias=None):
alias = alias if alias is not None else []
self.fontList[str(fontname).lower()] = str(fontname)
for a in alias:
if type(fontname) not in six.string_types:
fontname = str(fontname)
self.fontList[str(a)] = fontname
def loadFont(self, names, src, encoding="WinAnsiEncoding", bold=0, italic=0):
# XXX Just works for local filenames!
if names and src:
file = src
src = file.uri
log.debug("Load font %r", src)
if type(names) is ListType:
fontAlias = names
else:
fontAlias = (x.lower().strip() for x in names.split(",") if x)
# XXX Problems with unicode here
fontAlias = [str(x) for x in fontAlias]
fontName = fontAlias[0]
parts = src.split(".")
baseName, suffix = ".".join(parts[: - 1]), parts[- 1]
suffix = suffix.lower()
if suffix in ["ttc", "ttf"]:
# determine full font name according to weight and style
fullFontName = "%s_%d%d" % (fontName, bold, italic)
# check if font has already been registered
if fullFontName in self.fontList:
log.warn(
self.warning("Repeated font embed for %s, skip new embed ", fullFontName))
else:
# Register TTF font and special name
filename = file.getNamedFile()
pdfmetrics.registerFont(TTFont(fullFontName, filename))
# Add or replace missing styles
for bold in (0, 1):
for italic in (0, 1):
if ("%s_%d%d" % (fontName, bold, italic)) not in self.fontList:
addMapping(
fontName, bold, italic, fullFontName)
# Register "normal" name and the place holder for style
self.registerFont(fontName, fontAlias + [fullFontName])
elif suffix in ("afm", "pfb"):
if suffix == "afm":
afm = file.getNamedFile()
tfile = pisaFileObject(baseName + ".pfb")
pfb = tfile.getNamedFile()
else:
pfb = file.getNamedFile()
tfile = pisaFileObject(baseName + ".afm")
afm = tfile.getNamedFile()
# determine full font name according to weight and style
fullFontName = "%s_%d%d" % (fontName, bold, italic)
# check if font has already been registered
if fullFontName in self.fontList:
log.warn(
self.warning("Repeated font embed for %s, skip new embed", fontName))
else:
# Include font
face = pdfmetrics.EmbeddedType1Face(afm, pfb)
fontNameOriginal = face.name
pdfmetrics.registerTypeFace(face)
# print fontName, fontNameOriginal, fullFontName
justFont = pdfmetrics.Font(
fullFontName, fontNameOriginal, encoding)
pdfmetrics.registerFont(justFont)
# Add or replace missing styles
for bold in (0, 1):
for italic in (0, 1):
if ("%s_%d%d" % (fontName, bold, italic)) not in self.fontList:
addMapping(
fontName, bold, italic, fontNameOriginal)
# Register "normal" name and the place holder for style
self.registerFont(
fontName, fontAlias + [fullFontName, fontNameOriginal])
else:
log.warning(self.warning("wrong attributes for <pdf:font>"))
|
{
"content_hash": "cad34b35e178bd63313d956968174681",
"timestamp": "",
"source": "github",
"line_count": 932,
"max_line_length": 121,
"avg_line_length": 36.26609442060086,
"alnum_prop": 0.5387869822485207,
"repo_name": "orbitvu/xhtml2pdf",
"id": "b8e6a56e608423d3e0dcf0daeb289240026fcb39",
"size": "33824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xhtml2pdf/context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22999"
},
{
"name": "HTML",
"bytes": "468535"
},
{
"name": "Makefile",
"bytes": "1032"
},
{
"name": "Python",
"bytes": "470838"
}
],
"symlink_target": ""
}
|
"""This code example gets all image creatives. The statement retrieves up to the
maximum page size limit of 500. To create an image creative,
run create_creatives.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
creative_service = client.GetService('CreativeService', version='v201208')
# Create statement object to only select image creatives.
values = [{
'key': 'creativeType',
'value': {
'xsi_type': 'TextValue',
'value': 'ImageCreative'
}
}]
filter_statement = {'query': 'WHERE creativeType = :creativeType LIMIT 500',
'values': values}
# Get creatives by statement.
response = creative_service.GetCreativesByStatement(filter_statement)[0]
creatives = []
if 'results' in response:
creatives = response['results']
# Display results.
for creative in creatives:
print ('Creative with id \'%s\', name \'%s\', and type \'%s\' was found.'
% (creative['id'], creative['name'], creative['Creative_Type']))
print
print 'Number of results found: %s' % len(creatives)
|
{
"content_hash": "b2fa07b9220b42a25a480130b9c4f732",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 80,
"avg_line_length": 31.58695652173913,
"alnum_prop": 0.6799724707501721,
"repo_name": "lociii/googleads-python-lib",
"id": "f9aa040c70e85df33de8113be5ae3d78207b4970",
"size": "2071",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfp/v201208/get_creatives_by_statement.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3481618"
},
{
"name": "Shell",
"bytes": "603"
}
],
"symlink_target": ""
}
|
import time
from cumulusci.robotframework.pageobjects import BasePage
from cumulusci.robotframework.pageobjects import pageobject
from BaseObjects import BaseNPSPPage
from cumulusci.robotframework.utils import capture_screenshot_on_error
from selenium.webdriver.common.keys import Keys
from NPSP import npsp_lex_locators
from logging import exception
@pageobject("Custom", "ManageHousehold")
class ManageHouseholdPage(BaseNPSPPage, BasePage):
def _is_current_page(self):
"""
Waits for the current page to be a Manage Household page
"""
self.selenium.wait_until_location_contains("/one",timeout=60, message="Manage Household page did not load in 1 min")
self.npsp.wait_for_locator("frame","Manage Household")
def _go_to_page(self, filter_name=None):
"""
Verifies that current page is Manage Household
"""
self.selenium.wait_until_location_contains("/one",timeout=60, message="Manage Household page did not load in 1 min")
self.npsp.wait_for_locator("frame","Manage Household")
def change_address_using(self, option, **kwargs):
"""
Changes address setting based on the type of options chosen
Supported option is (Enter A new Address)
"""
if option.lower() == "enter a new address":
locator=npsp_lex_locators['button-title'].format("Enter a new address")
self.selenium.click_button(locator)
self.npsp.populate_modal_form(**kwargs)
self.selenium.click_button("Set Address")
self.selenium.click_button("Save")
self.selenium.unselect_frame()
@capture_screenshot_on_error
def add_contact(self, option, value):
"""
Performs a lookup of the contact provided as parameter and adds the contact to the hold based on the option
Supported options are (New/Existing)
"""
self.npsp.choose_frame("Manage Household")
xpath = npsp_lex_locators['manage_hh_page']['lookup'].format("Find a Contact or add a new Contact to the Household.")
field = self.selenium.get_webelement(xpath)
self.selenium.clear_element_text(field)
field.send_keys(value)
time.sleep(3)
field.send_keys(Keys.ENTER)
lookup_ele=npsp_lex_locators['household_lookup_dropdown_menu']
self.selenium.wait_until_element_is_visible(lookup_ele)
if option.lower() in ("new", "existing"):
new_contact_locator=npsp_lex_locators['manage_hh_page']['add_contact_option'].format("New Contact")
new_contact_btn=npsp_lex_locators['button-title'].format("New Contact")
self.selenium.wait_until_element_is_visible(new_contact_locator)
self.selenium.click_element(new_contact_locator)
self.selenium.wait_until_element_is_visible(new_contact_btn)
self.selenium.click_button(new_contact_btn)
else:
print("Invalid option")
self.selenium.wait_until_element_is_not_visible(lookup_ele)
self.selenium.click_button("Save")
self.selenium.unselect_frame()
def validate_and_select_checkbox(self,contact,option):
"""
Select the option for name display settings for each of the contact in the household
provided options (Household Name/ Formal Greeting/Informal Greeting)
"""
self.npsp.choose_frame("Manage Household")
loc=self.npsp.validate_checkboxes(contact,option)
self.selenium.double_click_element(loc)
self.selenium.unselect_frame()
def save_changes_made_for_manage_household(self):
self.npsp.choose_frame("Manage Household")
self.selenium.click_button("Save")
self.selenium.unselect_frame()
|
{
"content_hash": "ad7471d320e03d022001d9af39bc31be",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 125,
"avg_line_length": 43.2183908045977,
"alnum_prop": 0.6734042553191489,
"repo_name": "SalesforceFoundation/Cumulus",
"id": "aa5a0eaf79593974d4b2afb095af242e2fd6bbfc",
"size": "3760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robot/Cumulus/resources/ManageHouseHoldPageObject.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Apex",
"bytes": "8609134"
},
{
"name": "CSS",
"bytes": "31382"
},
{
"name": "HTML",
"bytes": "326875"
},
{
"name": "JavaScript",
"bytes": "584718"
},
{
"name": "ObjectScript",
"bytes": "2663"
},
{
"name": "Python",
"bytes": "131423"
},
{
"name": "RobotFramework",
"bytes": "195022"
},
{
"name": "Shell",
"bytes": "2579"
},
{
"name": "TSQL",
"bytes": "918118"
}
],
"symlink_target": ""
}
|
import pytest
@pytest.mark.xfail
def test_delete_bookmark():
raise NotImplementedError
|
{
"content_hash": "0371026e2b5d4d18ad6ce0a19eb01ec3",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 29,
"avg_line_length": 15.5,
"alnum_prop": 0.7741935483870968,
"repo_name": "globus/globus-sdk-python",
"id": "2a01c948a13a2c756327c8d6098314788df7229a",
"size": "93",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/functional/services/transfer/test_delete_bookmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "303"
},
{
"name": "Makefile",
"bytes": "810"
},
{
"name": "Python",
"bytes": "896256"
},
{
"name": "Shell",
"bytes": "125"
}
],
"symlink_target": ""
}
|
from SCons.Script import *
import os
import re
import string
import datetime
# -----------------------------------------------------------------------------
class FunctionScanner:
# FIXME correct handling of strings and comments
commentFilter = re.compile(r"/\*.*?\*/", re.DOTALL)
# find any function 'void test*();'
functionFilter = re.compile(r"void\s+(test[A-Z]\w*)\s*\([\svoid]*\)\s*;")
def __init__(self, filename):
self.text = open(filename).read()
def getFunctions(self):
self._stripCommentsAndStrings()
functions = self.functionFilter.findall(self.text)
return functions
def _stripCommentsAndStrings(self):
self._prepareString()
self._stripComments()
lines = self.text.splitlines()
for i, line in enumerate(lines):
lines[i] = self._stripCppComments(line)
self.text = '\n'.join(lines)
def _prepareString(self):
self.text = self.text.replace('\r\n', '\n')
self.text = self.text.replace('\\\n', '')
def _stripCppComments(self, line):
index = line.find(r"//")
if index >= 0:
return line[:index]
return line
def _stripComments(self):
self.text = self.commentFilter.sub('', self.text)
# -----------------------------------------------------------------------------
def generateClassName(s):
words = s.split('_')
name = []
for word in words:
index = 0
for c in word:
if c.isalpha():
break
index += 1
name.append(word[0:index])
name.append(word[index].upper())
name.append(word[index+1:])
return ''.join(name)
# -----------------------------------------------------------------------------
def unittest_action(target, source, env):
if not env.has_key('template'):
raise SCons.Errors.UserError, "Use 'UnittestRunner(..., template = ...)'"
template = env['template']
header = source
tests = {}
for file in header:
# io_stream_test.hpp -> io_stream_test
basename = os.path.splitext(file.name)[0]
# io_stream_test -> IoStreamTest
class_name = generateClassName(basename)
scanner = FunctionScanner(file.abspath)
tests[class_name] = {
'include_path': file.abspath,
'functions': scanner.getFunctions(),
'test_name': basename,
}
includes = []
name_strings = []
tests_cases = []
for class_name, attr in tests.iteritems():
includes.append('#include "%s"' % attr['include_path'])
instance_name = class_name[0].lower() + class_name[1:]
test_name_string = instance_name + 'Name'
name_strings.append('FLASH_STORAGE_STRING(%s) = "%s";' % (test_name_string, attr['test_name']))
str = """\
unittest::Controller::instance().nextTestSuite(xpcc::accessor::asFlash(%s));
{
%s %s;
""" % (test_name_string, class_name, instance_name)
for function_name in attr['functions']:
str += """
%(instance)s.setUp();
%(instance)s.%(function)s();
%(instance)s.tearDown();
""" % { 'instance': instance_name, 'function': function_name }
str += "}"
tests_cases.append(str)
substitutions = {
'includes': '\n'.join(includes),
'names': 'namespace\n{\n\t%s\n}' % '\n\t'.join(name_strings),
'tests': '\n'.join(tests_cases),
}
input = open(os.path.abspath(template), 'r').read()
output = string.Template(input).safe_substitute(substitutions)
open(target[0].abspath, 'w').write(output)
return 0
def unittest_emitter(target, source, env):
try:
Depends(target, SCons.Node.Python.Value(env['ARCHITECTURE']))
except KeyError:
pass
header = []
for file in source:
if file.name.endswith('_test.hpp'):
header.append(file)
return target, header
# -----------------------------------------------------------------------------
def generate(env, **kw):
env.Append(BUILDERS = {
'UnittestRunner': Builder(
action = SCons.Action.Action(unittest_action, "Generate runner file: $TARGET"),
suffix = '.cpp',
emitter = unittest_emitter,
target_factory = env.fs.File)
})
def exists(env):
return True
|
{
"content_hash": "1f0bfc1e8916a835ce12a7f3d708bad9",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 97,
"avg_line_length": 26.08053691275168,
"alnum_prop": 0.5998455995882656,
"repo_name": "jrahlf/3D-Non-Contact-Laser-Profilometer",
"id": "3e6047a4561a5fdcc3600f90e21c4b3b3a683bcc",
"size": "5474",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "xpcc/scons/site_tools/unittest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "35578"
},
{
"name": "C",
"bytes": "10808820"
},
{
"name": "C++",
"bytes": "6982194"
},
{
"name": "CSS",
"bytes": "20227"
},
{
"name": "Gnuplot",
"bytes": "823"
},
{
"name": "Java",
"bytes": "89102"
},
{
"name": "Objective-C",
"bytes": "12577"
},
{
"name": "Objective-C++",
"bytes": "2376"
},
{
"name": "Python",
"bytes": "420663"
},
{
"name": "Shell",
"bytes": "639"
}
],
"symlink_target": ""
}
|
"""Accesses the google.logging.v2 LoggingServiceV2 API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from google.api import monitored_resource_pb2
from google.cloud.logging_v2.gapic import enums
from google.cloud.logging_v2.gapic import logging_service_v2_client_config
from google.cloud.logging_v2.gapic.transports import logging_service_v2_grpc_transport
from google.cloud.logging_v2.proto import log_entry_pb2
from google.cloud.logging_v2.proto import logging_pb2
from google.cloud.logging_v2.proto import logging_pb2_grpc
from google.protobuf import empty_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-logging").version
class LoggingServiceV2Client(object):
"""Service for ingesting and querying logs."""
SERVICE_ADDRESS = "logging.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.logging.v2.LoggingServiceV2"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
LoggingServiceV2Client: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def log_path(cls, project, log):
"""Return a fully-qualified log string."""
return google.api_core.path_template.expand(
"projects/{project}/logs/{log}", project=project, log=log
)
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
"projects/{project}", project=project
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
):
"""Constructor.
Args:
transport (Union[~.LoggingServiceV2GrpcTransport,
Callable[[~.Credentials, type], ~.LoggingServiceV2GrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = logging_service_v2_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=logging_service_v2_grpc_transport.LoggingServiceV2GrpcTransport,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = logging_service_v2_grpc_transport.LoggingServiceV2GrpcTransport(
address=self.SERVICE_ADDRESS, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def delete_log(
self,
log_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes all the log entries in a log.
The log reappears if it receives new entries.
Log entries written shortly before the delete operation might not be
deleted.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> log_name = client.log_path('[PROJECT]', '[LOG]')
>>>
>>> client.delete_log(log_name)
Args:
log_name (str): Required. The resource name of the log to delete:
::
"projects/[PROJECT_ID]/logs/[LOG_ID]"
"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]"
"folders/[FOLDER_ID]/logs/[LOG_ID]"
``[LOG_ID]`` must be URL-encoded. For example,
``"projects/my-project-id/logs/syslog"``,
``"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"``.
For more information about log names, see ``LogEntry``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_log" not in self._inner_api_calls:
self._inner_api_calls[
"delete_log"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_log,
default_retry=self._method_configs["DeleteLog"].retry,
default_timeout=self._method_configs["DeleteLog"].timeout,
client_info=self._client_info,
)
request = logging_pb2.DeleteLogRequest(log_name=log_name)
self._inner_api_calls["delete_log"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def write_log_entries(
self,
entries,
log_name=None,
resource=None,
labels=None,
partial_success=None,
dry_run=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Writes log entries to Logging. This API method is the
only way to send log entries to Logging. This method
is used, directly or indirectly, by the Logging agent
(fluentd) and all logging libraries configured to use Logging.
A single request may contain log entries for a maximum of 1000
different resources (projects, organizations, billing accounts or
folders)
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> # TODO: Initialize `entries`:
>>> entries = []
>>>
>>> response = client.write_log_entries(entries)
Args:
entries (list[Union[dict, ~google.cloud.logging_v2.types.LogEntry]]): Required. The log entries to send to Logging. The order of log entries
in this list does not matter. Values supplied in this method's
``log_name``, ``resource``, and ``labels`` fields are copied into those
log entries in this list that do not include values for their
corresponding fields. For more information, see the ``LogEntry`` type.
If the ``timestamp`` or ``insert_id`` fields are missing in log entries,
then this method supplies the current time or a unique identifier,
respectively. The supplied values are chosen so that, among the log
entries that did not supply their own values, the entries earlier in the
list will sort before the entries later in the list. See the
``entries.list`` method.
Log entries with timestamps that are more than the `logs retention
period <https://cloud.google.com/logging/quota-policy>`__ in the past or
more than 24 hours in the future will not be available when calling
``entries.list``. However, those log entries can still be exported with
`LogSinks <https://cloud.google.com/logging/docs/api/tasks/exporting-logs>`__.
To improve throughput and to avoid exceeding the `quota
limit <https://cloud.google.com/logging/quota-policy>`__ for calls to
``entries.write``, you should try to include several log entries in this
list, rather than calling this method for each individual log entry.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.logging_v2.types.LogEntry`
log_name (str): Optional. A default log resource name that is assigned to all log
entries in ``entries`` that do not specify a value for ``log_name``:
::
"projects/[PROJECT_ID]/logs/[LOG_ID]"
"organizations/[ORGANIZATION_ID]/logs/[LOG_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]"
"folders/[FOLDER_ID]/logs/[LOG_ID]"
``[LOG_ID]`` must be URL-encoded. For example:
::
"projects/my-project-id/logs/syslog"
"organizations/1234567890/logs/cloudresourcemanager.googleapis.com%2Factivity"
The permission logging.logEntries.create is needed on each project,
organization, billing account, or folder that is receiving new log
entries, whether the resource is specified in logName or in an
individual log entry.
resource (Union[dict, ~google.cloud.logging_v2.types.MonitoredResource]): Optional. A default monitored resource object that is assigned to all
log entries in ``entries`` that do not specify a value for ``resource``.
Example:
::
{ "type": "gce_instance",
"labels": {
"zone": "us-central1-a", "instance_id": "00000000000000000000" }}
See ``LogEntry``.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.logging_v2.types.MonitoredResource`
labels (dict[str -> str]): Optional. Default labels that are added to the ``labels`` field of all
log entries in ``entries``. If a log entry already has a label with the
same key as a label in this parameter, then the log entry's label is not
changed. See ``LogEntry``.
partial_success (bool): Optional. Whether valid entries should be written even if some other
entries fail due to INVALID\_ARGUMENT or PERMISSION\_DENIED errors. If
any entry is not written, then the response status is the error
associated with one of the failed entries and the response includes
error details keyed by the entries' zero-based index in the
``entries.write`` method.
dry_run (bool): Optional. If true, the request should expect normal response, but the
entries won't be persisted nor exported. Useful for checking whether the
logging API endpoints are working properly before sending valuable data.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.logging_v2.types.WriteLogEntriesResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "write_log_entries" not in self._inner_api_calls:
self._inner_api_calls[
"write_log_entries"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.write_log_entries,
default_retry=self._method_configs["WriteLogEntries"].retry,
default_timeout=self._method_configs["WriteLogEntries"].timeout,
client_info=self._client_info,
)
request = logging_pb2.WriteLogEntriesRequest(
entries=entries,
log_name=log_name,
resource=resource,
labels=labels,
partial_success=partial_success,
dry_run=dry_run,
)
return self._inner_api_calls["write_log_entries"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def list_log_entries(
self,
resource_names,
project_ids=None,
filter_=None,
order_by=None,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists log entries. Use this method to retrieve log entries from Logging.
For ways to export log entries, see `Exporting
Logs <https://cloud.google.com/logging/docs/export>`__.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> # TODO: Initialize `resource_names`:
>>> resource_names = []
>>>
>>> # Iterate over all results
>>> for element in client.list_log_entries(resource_names):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_log_entries(resource_names).pages:
... for element in page:
... # process element
... pass
Args:
resource_names (list[str]): Required. Names of one or more parent resources from which to retrieve
log entries:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
Projects listed in the ``project_ids`` field are added to this list.
project_ids (list[str]): Deprecated. Use ``resource_names`` instead. One or more project
identifiers or project numbers from which to retrieve log entries.
Example: ``"my-project-1A"``. If present, these project identifiers are
converted to resource name format and added to the list of resources in
``resource_names``.
filter_ (str): Optional. A filter that chooses which log entries to return. See
`Advanced Logs
Filters <https://cloud.google.com/logging/docs/view/advanced_filters>`__.
Only log entries that match the filter are returned. An empty filter
matches all log entries in the resources listed in ``resource_names``.
Referencing a parent resource that is not listed in ``resource_names``
will cause the filter to return no results. The maximum length of the
filter is 20000 characters.
order_by (str): Optional. How the results should be sorted. Presently, the only
permitted values are ``"timestamp asc"`` (default) and
``"timestamp desc"``. The first option returns entries in order of
increasing values of ``LogEntry.timestamp`` (oldest first), and the
second option returns entries in order of decreasing timestamps (newest
first). Entries with equal timestamps are returned in order of their
``insert_id`` values.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.logging_v2.types.LogEntry` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_log_entries" not in self._inner_api_calls:
self._inner_api_calls[
"list_log_entries"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_log_entries,
default_retry=self._method_configs["ListLogEntries"].retry,
default_timeout=self._method_configs["ListLogEntries"].timeout,
client_info=self._client_info,
)
request = logging_pb2.ListLogEntriesRequest(
resource_names=resource_names,
project_ids=project_ids,
filter=filter_,
order_by=order_by,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_log_entries"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="entries",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def list_monitored_resource_descriptors(
self,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists the descriptors for monitored resource types used by Logging.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> # Iterate over all results
>>> for element in client.list_monitored_resource_descriptors():
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_monitored_resource_descriptors().pages:
... for element in page:
... # process element
... pass
Args:
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.logging_v2.types.MonitoredResourceDescriptor` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_monitored_resource_descriptors" not in self._inner_api_calls:
self._inner_api_calls[
"list_monitored_resource_descriptors"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_monitored_resource_descriptors,
default_retry=self._method_configs[
"ListMonitoredResourceDescriptors"
].retry,
default_timeout=self._method_configs[
"ListMonitoredResourceDescriptors"
].timeout,
client_info=self._client_info,
)
request = logging_pb2.ListMonitoredResourceDescriptorsRequest(
page_size=page_size
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_monitored_resource_descriptors"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="resource_descriptors",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def list_logs(
self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Lists the logs in projects, organizations, folders, or billing accounts.
Only logs that have entries are listed.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.LoggingServiceV2Client()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in client.list_logs(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_logs(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The resource name that owns the logs:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`str` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_logs" not in self._inner_api_calls:
self._inner_api_calls[
"list_logs"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_logs,
default_retry=self._method_configs["ListLogs"].retry,
default_timeout=self._method_configs["ListLogs"].timeout,
client_info=self._client_info,
)
request = logging_pb2.ListLogsRequest(parent=parent, page_size=page_size)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_logs"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="log_names",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
|
{
"content_hash": "50b3e031e3ca739d29db1e0d4baceaff",
"timestamp": "",
"source": "github",
"line_count": 701,
"max_line_length": 155,
"avg_line_length": 45.05135520684736,
"alnum_prop": 0.5855419397739147,
"repo_name": "dhermes/gcloud-python",
"id": "692f01f290a62e1165297583a652b297b75f371b",
"size": "32182",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "logging/google/cloud/logging_v2/gapic/logging_service_v2_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "95635"
},
{
"name": "Python",
"bytes": "2871895"
},
{
"name": "Shell",
"bytes": "4683"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from envisage.ui.single_project.project_factory import *
|
{
"content_hash": "507463a7d35edb222b1507441be45eda",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 56,
"avg_line_length": 48,
"alnum_prop": 0.8020833333333334,
"repo_name": "enthought/etsproxy",
"id": "d99764ba434b8d3afa5ef10ef7cb12849e1836af",
"size": "111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/envisage/ui/single_project/project_factory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
}
|
import argparse
# Parameters
parser = argparse.ArgumentParser(description='Catcher')
parser.add_argument('--grid', dest='grid', type=int, default=11, help='Game grid size.')
parser.add_argument('--memory', dest='memory', type=int, default=500, help='Experience replay memory length')
parser.add_argument('--epoch', dest='epoch', type=int, default=1000, help='Number for epochs')
parser.add_argument('--batch', dest='batch', type=int, default=50, help='Batch size retrieved from experience replay meomory')
parser.add_argument('--epsilon', dest='epsilon', type=float, default=0.1, help='Exploration rate epsilon')
parser.add_argument('--gamma', dest='gamma', type=float, default=0.9, help='Discount rate gamma')
parser.add_argument('--save', dest='save', type=str, default='catcher_agent.h5', help='Discount rate gamma')
parser.add_argument('--output', dest='output', type=str, default='catcher_output.gif', help='Path to save output animation.')
args = parser.parse_args()
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.core import Flatten
from keras.optimizers import SGD
from x.environment import Catcher
from x.models import KerasModel
from x.memory import ExperienceReplay
from x.agent import DiscreteAgent
num_actions = 3
nb_filters, nb_rows, nb_cols = 32, 3, 3
# keras model
keras_model = Sequential()
keras_model.add(Convolution2D(nb_filters, nb_rows, nb_cols, input_shape=(1, args.grid, args.grid), activation='relu', subsample=(2, 2)))
keras_model.add(Convolution2D(nb_filters, nb_rows, nb_cols, activation='relu'))
keras_model.add(Convolution2D(num_actions, nb_rows, nb_cols))
keras_model.add(MaxPooling2D(keras_model.output_shape[-2:]))
keras_model.add(Flatten())
# X wrapper for Keras
model = KerasModel(keras_model)
# Memory
M = ExperienceReplay(memory_length=args.memory)
# Agent
A = DiscreteAgent(model, M)
# SGD optimizer + MSE cost + MAX policy = Q-learning as we know it
A.compile(optimizer=SGD(lr=0.2), loss="mse", policy_rule="max")
# To run an experiment, the Agent needs an Enviroment to iteract with
catcher = Catcher(grid_size=args.grid, output_shape=(1, args.grid, args.grid))
A.learn(catcher, epoch=args.epoch, batch_size=args.batch)
# Test the agent following the learned policy
A.play(catcher, epoch=100, visualize={'filepath': args.output, 'n_frames': 270, 'gray': True})
|
{
"content_hash": "5ec0291bc31b93e17c1b16484933cecc",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 136,
"avg_line_length": 45.86538461538461,
"alnum_prop": 0.7488469601677149,
"repo_name": "EderSantana/X",
"id": "3c3b12b4d54233a6ae3d7dee8a275614c90ac26e",
"size": "2385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/catcher_cnn.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2054"
},
{
"name": "Python",
"bytes": "34968"
}
],
"symlink_target": ""
}
|
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
import requests, json
Base = automap_base()
# engine, suppose it has two tables 'user' and 'address' set up
#engine = create_engine('mssql+pyodbc://devel:n***********d@NEIL-DESKTOP/FRESH?driver=SQL Server; Trusted_Connection=Yes', echo=True)
# reflect the tables
#Base.prepare(engine, reflect=True)
# mapped classes are now created with names by default
# matching that of the table name.
#session = Session(engine)
github_url = "https://nkellis.herokuapp.com/api/v1.0/posts/"
headers = {'content-type': 'application/json'}
#Item = Base.classes.Item
#for instance in session.query(Item).order_by(Item.ID):
data = json.dumps({'productName': "text",'author_id':1})
#data = {"productName": "this is Neils shiny new matrix"}
print data
r = requests.post(github_url, data, headers=headers, auth=('neilkenealy@gmail.com', 'n****************d'))
print r.json
|
{
"content_hash": "c7e4eba0bfd352440149bcca85462066",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 133,
"avg_line_length": 32.70967741935484,
"alnum_prop": 0.7001972386587771,
"repo_name": "nkenealy/shopify-product-load",
"id": "a55c61c2e674873c50f8d7daed156a29990d0ff1",
"size": "1014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sql_server_upload/ellis_pamreq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20212"
},
{
"name": "JavaScript",
"bytes": "720710"
},
{
"name": "Python",
"bytes": "123638"
}
],
"symlink_target": ""
}
|
import unittest
from biicode.common.utils.serializer import Serializer, DictDeserializer
from biicode.common.model.brl.block_cell_name import BlockCellName
class DummyA(object):
a = None
b = None
def __init__(self, *args, **kwargs):
self.a = 1
self.b = 2
def serialize(self):
return Serializer().build(("a", self.a), ("b", self.b))
class DummyB(DummyA):
def __init__(self, *args, **kwargs):
self.a = DummyA()
self.b = 2
def serialize(self):
return Serializer().build(("a", self.a), ("b", self.b))
class DummyC(DummyB):
def serialize(self):
return Serializer().build(("a", self.a), ("b", self.b))
class DummyD(object):
def serialize(self):
return Serializer().build(("list", ("uno", "dos")))
class DummyE(DummyB):
def serialize(self):
return Serializer().build(("list", (self.a, "dos")))
def __init__(self, *args, **kwargs):
self.a = DummyA()
self.b = 2
class SerializerTest(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.__a = DummyA()
self.__b = DummyB()
self.__c = DummyC()
self.__d = DummyD()
self.__e = DummyE()
def testKeyRepeated(self):
''' should raise an exception for a repeated key '''
self.assertRaises(ValueError,
Serializer().build,
("k2", "value2"), ("k", "value3"), ("k2", "value4")
)
def testRecursiveSerialization(self):
''' should call to_dict from DummyA '''
s = Serializer().build(
("a", self.__a),
("b", 122)
)
self.assertEqual(s, {"a": {"a": 1, "b": 2}, "b": 122})
def testRecursiveSerialization2(self):
''' should call to_dict from DummyA and DummyB '''
s = Serializer().build(
("a", self.__a),
("b", self.__b)
)
self.assertEqual(s, {"a": {"a": 1, "b": 2}, "b": {"a": {"a": 1, "b": 2}, "b": 2}})
def testKeyRepeatedEmbedding(self):
self.assertRaises(ValueError,
Serializer().build,
(None, self.__a), # Embed DummyA here, "b" key repeated
("b", self.__b)
)
def testEmbed(self):
s = Serializer().build(
(None, self.__a),
("c", self.__c)
)
self.assertEqual(s, {'a': 1, 'b': 2, 'c': {'a': {'a': 1, 'b': 2}, 'b': 2}})
def testObjectWithList(self):
s = Serializer().build(
(None, self.__a),
("d", self.__d)
)
self.assertEqual(s, {'a': 1, 'b': 2, 'd': {'list': ["uno", "dos"]}})
def testObjectWithComplexList(self):
s = Serializer().build(
(None, self.__a),
("e", self.__e)
)
self.assertEqual(s, {'a': 1, 'b': 2, 'e': {'list': [{'a': 1, 'b': 2}, "dos"]}})
class TestDictDeserializer(unittest.TestCase):
def test_deserialize_none(self):
dd = DictDeserializer(str, str)
self.assertIsNone(dd.deserialize(None))
def testDeserializeDict(self):
brl = BlockCellName("user/block/path/file.h")
brl2 = BlockCellName("user/block/path/file2.h")
h = {brl.serialize(): "asasdasdasd",
brl2.serialize(): "1123"
}
ret = DictDeserializer(BlockCellName, str).deserialize(h)
self.assertEqual(ret, {'user/block/path/file.h': 'asasdasdasd',
'user/block/path/file2.h': '1123'})
h = {brl.serialize(): brl.serialize(),
brl2.serialize(): brl.serialize()
}
ret = DictDeserializer(BlockCellName, BlockCellName).deserialize(h)
self.assertEqual(ret,
{'user/block/path/file.h': 'user/block/path/file.h',
'user/block/path/file2.h': 'user/block/path/file.h'}
)
|
{
"content_hash": "f8e6d9865ebf89fcfb22b91586a26050",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 90,
"avg_line_length": 30.97826086956522,
"alnum_prop": 0.47134502923976607,
"repo_name": "biicode/common",
"id": "f8850b16cebf4bf8781bc346ccf9f1199aaef65e",
"size": "4275",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "test/utils/serializer_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3157300"
},
{
"name": "C++",
"bytes": "4667113"
},
{
"name": "CMake",
"bytes": "25379"
},
{
"name": "FORTRAN",
"bytes": "3691"
},
{
"name": "Java",
"bytes": "4201"
},
{
"name": "JavaScript",
"bytes": "172849"
},
{
"name": "Makefile",
"bytes": "6333"
},
{
"name": "Objective-C",
"bytes": "826"
},
{
"name": "Python",
"bytes": "702276"
},
{
"name": "Shell",
"bytes": "645"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
# Register your models here.
from pedidovenda.models import Categoria
from pedidovenda.models import Produto
from pedidovenda.models import Pedido
from pedidovenda.models import ItemPedido
from pedidovenda.models import Mesa
from pedidovenda.models import Usuario
from django.contrib import admin
class CategoriaAdmin(admin.ModelAdmin):
list_display = ('id', 'descricao')
class ProdutoAdmin(admin.ModelAdmin):
list_display = ('id', 'descricao', 'valor', 'categoria')
def get_name(self, obj):
return obj.categoria.descricao
get_name.admin_order_field = 'categoria' #Allows column order sorting
get_name.short_description = 'Categoria' #Renames column head
class MesaAdmin(admin.ModelAdmin):
list_display = ('id', 'descricao', 'status', 'tipo')
class PedidoAdmin(admin.ModelAdmin):
list_display = ('id', 'valorTotal', 'codigoAtendente', 'status')
class ItemPedidoAdmin(admin.ModelAdmin):
list_display = ('id', 'quantidade', 'observacao', 'status', 'valorUnit', 'valorTotalItem')
class UsuarioAdmin(admin.ModelAdmin):
list_display = ('id', 'nome', 'email', 'login', 'senha')
admin.site.register(Produto,ProdutoAdmin)
admin.site.register(Pedido,PedidoAdmin)
admin.site.register(ItemPedido,ItemPedidoAdmin)
admin.site.register(Categoria, CategoriaAdmin)
admin.site.register(Mesa, MesaAdmin)
admin.site.register(Usuario, UsuarioAdmin)
|
{
"content_hash": "4f7072f01c967fd7d2bdbe6ba8727fd6",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 91,
"avg_line_length": 29.0625,
"alnum_prop": 0.7670250896057348,
"repo_name": "thaleslima/WS-pedido-venda",
"id": "513d6ba52a61d4329298e08ec3d0c858eff1a693",
"size": "1395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pedidovenda/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "19858"
},
{
"name": "JavaScript",
"bytes": "15276"
},
{
"name": "Python",
"bytes": "13157"
}
],
"symlink_target": ""
}
|
import logging
def make_logger(configuration):
pass
|
{
"content_hash": "4adcd9ca856ed3707ea3839fe37c060a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 31,
"avg_line_length": 11.4,
"alnum_prop": 0.7543859649122807,
"repo_name": "tokyo-jesus/aniracetam",
"id": "304c02c84ec61c8016f760f25f161af168f7bee0",
"size": "82",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aniracetam/_logging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23876"
},
{
"name": "Shell",
"bytes": "136"
}
],
"symlink_target": ""
}
|
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group: str,
fluid_relay_server_name: str,
fluid_relay_container_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-06-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers/{fluidRelayServerName}/fluidRelayContainers/{fluidRelayContainerName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroup": _SERIALIZER.url("resource_group", resource_group, 'str'),
"fluidRelayServerName": _SERIALIZER.url("fluid_relay_server_name", fluid_relay_server_name, 'str'),
"fluidRelayContainerName": _SERIALIZER.url("fluid_relay_container_name", fluid_relay_container_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_delete_request(
subscription_id: str,
resource_group: str,
fluid_relay_server_name: str,
fluid_relay_container_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-06-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers/{fluidRelayServerName}/fluidRelayContainers/{fluidRelayContainerName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroup": _SERIALIZER.url("resource_group", resource_group, 'str'),
"fluidRelayServerName": _SERIALIZER.url("fluid_relay_server_name", fluid_relay_server_name, 'str'),
"fluidRelayContainerName": _SERIALIZER.url("fluid_relay_container_name", fluid_relay_container_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_by_fluid_relay_servers_request(
subscription_id: str,
resource_group: str,
fluid_relay_server_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-06-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers/{fluidRelayServerName}/fluidRelayContainers") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroup": _SERIALIZER.url("resource_group", resource_group, 'str'),
"fluidRelayServerName": _SERIALIZER.url("fluid_relay_server_name", fluid_relay_server_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
class FluidRelayContainersOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.fluidrelay.FluidRelayManagementClient`'s
:attr:`fluid_relay_containers` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get(
self,
resource_group: str,
fluid_relay_server_name: str,
fluid_relay_container_name: str,
**kwargs: Any
) -> _models.FluidRelayContainer:
"""Get a Fluid Relay container.
Get a Fluid Relay container.
:param resource_group: The resource group containing the resource.
:type resource_group: str
:param fluid_relay_server_name: The Fluid Relay server resource name.
:type fluid_relay_server_name: str
:param fluid_relay_container_name: The Fluid Relay container resource name.
:type fluid_relay_container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FluidRelayContainer, or the result of cls(response)
:rtype: ~azure.mgmt.fluidrelay.models.FluidRelayContainer
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-06-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.FluidRelayContainer]
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group=resource_group,
fluid_relay_server_name=fluid_relay_server_name,
fluid_relay_container_name=fluid_relay_container_name,
api_version=api_version,
template_url=self.get.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('FluidRelayContainer', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers/{fluidRelayServerName}/fluidRelayContainers/{fluidRelayContainerName}"} # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group: str,
fluid_relay_server_name: str,
fluid_relay_container_name: str,
**kwargs: Any
) -> None:
"""Delete a Fluid Relay container.
Delete a Fluid Relay container.
:param resource_group: The resource group containing the resource.
:type resource_group: str
:param fluid_relay_server_name: The Fluid Relay server resource name.
:type fluid_relay_server_name: str
:param fluid_relay_container_name: The Fluid Relay container resource name.
:type fluid_relay_container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-06-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group=resource_group,
fluid_relay_server_name=fluid_relay_server_name,
fluid_relay_container_name=fluid_relay_container_name,
api_version=api_version,
template_url=self.delete.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers/{fluidRelayServerName}/fluidRelayContainers/{fluidRelayContainerName}"} # type: ignore
@distributed_trace
def list_by_fluid_relay_servers(
self,
resource_group: str,
fluid_relay_server_name: str,
**kwargs: Any
) -> Iterable[_models.FluidRelayContainerList]:
"""List all Fluid Relay containers which are children of a given Fluid Relay server.
List all Fluid Relay containers which are children of a given Fluid Relay server.
:param resource_group: The resource group containing the resource.
:type resource_group: str
:param fluid_relay_server_name: The Fluid Relay server resource name.
:type fluid_relay_server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FluidRelayContainerList or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.fluidrelay.models.FluidRelayContainerList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-06-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.FluidRelayContainerList]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_fluid_relay_servers_request(
subscription_id=self._config.subscription_id,
resource_group=resource_group,
fluid_relay_server_name=fluid_relay_server_name,
api_version=api_version,
template_url=self.list_by_fluid_relay_servers.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_by_fluid_relay_servers_request(
subscription_id=self._config.subscription_id,
resource_group=resource_group,
fluid_relay_server_name=fluid_relay_server_name,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("FluidRelayContainerList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_fluid_relay_servers.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.FluidRelay/fluidRelayServers/{fluidRelayServerName}/fluidRelayContainers"} # type: ignore
|
{
"content_hash": "799d7fadbb064e57d5d88d7ff52cbe47",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 255,
"avg_line_length": 42.78010471204188,
"alnum_prop": 0.6464325052013218,
"repo_name": "Azure/azure-sdk-for-python",
"id": "539ab71c846e8a3294b1bc5271e7bd7637e40057",
"size": "16842",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/fluidrelay/azure-mgmt-fluidrelay/azure/mgmt/fluidrelay/operations/_fluid_relay_containers_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
VERSION='0.1' # very early develop version
|
{
"content_hash": "2855de095a68db9d8ca1be8f37a407bc",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 42,
"avg_line_length": 43,
"alnum_prop": 0.7441860465116279,
"repo_name": "pyKun/nova-thousands",
"id": "2a3c4ab61958ec5b99e51432a13b069d9b1266db",
"size": "135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "novathousands/version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4356"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
from Cython.Build import cythonize
setup(
ext_modules = cythonize("gen_walk.pyx", language="c++")
)
|
{
"content_hash": "6e7d4c2dfbccc127cc69f22ceeb949e1",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 63,
"avg_line_length": 24.333333333333332,
"alnum_prop": 0.6917808219178082,
"repo_name": "gear/motifwalk",
"id": "fc75f2d11a5d4a50494a5e146601b00e24c8a8cd",
"size": "146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "research/src/mane/genwalk/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "19444158"
},
{
"name": "PostScript",
"bytes": "3502"
},
{
"name": "Python",
"bytes": "306067"
},
{
"name": "Roff",
"bytes": "13529332"
},
{
"name": "Shell",
"bytes": "4391"
},
{
"name": "TeX",
"bytes": "314493"
}
],
"symlink_target": ""
}
|
from unittest import mock
from heat.engine.clients.os import zaqar
from heat.tests import common
from heat.tests import utils
class ZaqarClientPluginTest(common.HeatTestCase):
def test_create(self):
context = utils.dummy_context()
plugin = context.clients.client_plugin('zaqar')
client = plugin.client()
self.assertIsNotNone(client.queue)
def test_create_for_tenant(self):
context = utils.dummy_context()
plugin = context.clients.client_plugin('zaqar')
client = plugin.create_for_tenant('other_tenant', 'token')
self.assertEqual('other_tenant',
client.conf['auth_opts']['options']['os_project_id'])
self.assertEqual('token',
client.conf['auth_opts']['options']['os_auth_token'])
def test_event_sink(self):
context = utils.dummy_context()
client = context.clients.client('zaqar')
fake_queue = mock.MagicMock()
client.queue = lambda x, auto_create: fake_queue
sink = zaqar.ZaqarEventSink('myqueue')
sink.consume(context, {'hello': 'world'})
fake_queue.post.assert_called_once_with(
{'body': {'hello': 'world'}, 'ttl': 3600})
|
{
"content_hash": "5f5c6731d870101e4c37a59e57fd0716",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 78,
"avg_line_length": 37.303030303030305,
"alnum_prop": 0.6246953696181966,
"repo_name": "openstack/heat",
"id": "bd588c7c913f85ce4cb7221f9c09de9dba2b8f5f",
"size": "1806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/clients/test_zaqar_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9145593"
},
{
"name": "Shell",
"bytes": "65832"
}
],
"symlink_target": ""
}
|
"""
====================================================================
Decoding in sensor space data using the Common Spatial Pattern (CSP)
====================================================================
Decoding applied to MEG data in sensor space decomposed using CSP.
Here the classifier is applied to features extracted on CSP filtered signals.
See http://en.wikipedia.org/wiki/Common_spatial_pattern and [1]
[1] Zoltan J. Koles. The quantitative extraction and topographic mapping
of the abnormal components in the clinical EEG. Electroencephalography
and Clinical Neurophysiology, 79(6):440--447, December 1991.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Romain Trachel <romain.trachel@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters and read data
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
raw.filter(2, None, method='iir') # replace baselining with high-pass
events = mne.read_events(event_fname)
raw.info['bads'] = ['MEG 2443'] # set bad channels
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=False, eog=False,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=None, preload=True)
labels = epochs.events[:, -1]
evoked = epochs.average()
###############################################################################
# Decoding in sensor space using a linear SVM
from sklearn.svm import SVC # noqa
from sklearn.cross_validation import ShuffleSplit # noqa
from mne.decoding import CSP # noqa
n_components = 3 # pick some components
svc = SVC(C=1, kernel='linear')
csp = CSP(n_components=n_components)
# Define a monte-carlo cross-validation generator (reduce variance):
cv = ShuffleSplit(len(labels), 10, test_size=0.2, random_state=42)
scores = []
epochs_data = epochs.get_data()
for train_idx, test_idx in cv:
y_train, y_test = labels[train_idx], labels[test_idx]
X_train = csp.fit_transform(epochs_data[train_idx], y_train)
X_test = csp.transform(epochs_data[test_idx])
# fit classifier
svc.fit(X_train, y_train)
scores.append(svc.score(X_test, y_test))
# Printing the results
class_balance = np.mean(labels == labels[0])
class_balance = max(class_balance, 1. - class_balance)
print("Classification accuracy: %f / Chance level: %f" % (np.mean(scores),
class_balance))
# Or use much more convenient scikit-learn cross_val_score function using
# a Pipeline
from sklearn.pipeline import Pipeline # noqa
from sklearn.cross_validation import cross_val_score # noqa
cv = ShuffleSplit(len(labels), 10, test_size=0.2, random_state=42)
clf = Pipeline([('CSP', csp), ('SVC', svc)])
scores = cross_val_score(clf, epochs_data, labels, cv=cv, n_jobs=1)
print(scores.mean()) # should match results above
# And using reuglarized csp with Ledoit-Wolf estimator
csp = CSP(n_components=n_components, reg='ledoit_wolf')
clf = Pipeline([('CSP', csp), ('SVC', svc)])
scores = cross_val_score(clf, epochs_data, labels, cv=cv, n_jobs=1)
print(scores.mean()) # should get better results than above
# plot CSP patterns estimated on full data for visualization
csp.fit_transform(epochs_data, labels)
evoked.data = csp.patterns_.T
evoked.times = np.arange(evoked.data.shape[0])
evoked.plot_topomap(times=[0, 1, 2, 3], ch_type='grad',
colorbar=False, size=1.5)
|
{
"content_hash": "8283db475bd11270a575d4d381aabf42",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 79,
"avg_line_length": 36.55140186915888,
"alnum_prop": 0.646637688570698,
"repo_name": "mne-tools/mne-tools.github.io",
"id": "da1ad28ddcebe89dd7920e4a0d5c129fbfd100c1",
"size": "3911",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "0.11/_downloads/plot_decoding_csp_space.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "708696"
},
{
"name": "Dockerfile",
"bytes": "1820"
},
{
"name": "HTML",
"bytes": "1526247783"
},
{
"name": "JavaScript",
"bytes": "1323087"
},
{
"name": "Jupyter Notebook",
"bytes": "24820047"
},
{
"name": "Python",
"bytes": "18575494"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.