text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Controllers for the cron jobs."""
pass
|
{
"content_hash": "5df627f98e76912cfbe304ecdcc0b620",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 36,
"avg_line_length": 14.333333333333334,
"alnum_prop": 0.6744186046511628,
"repo_name": "miyucy/oppia",
"id": "63ee7650ec9296c591b268c27c257101720e0ef4",
"size": "648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/controllers/cron.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "441"
},
{
"name": "CSS",
"bytes": "23262"
},
{
"name": "JavaScript",
"bytes": "793925"
},
{
"name": "Python",
"bytes": "1049129"
},
{
"name": "Shell",
"bytes": "21516"
}
],
"symlink_target": ""
}
|
import os
import re
import sys
from setuptools import find_packages, setup
def get_version(*file_paths):
"""Retrieves the version from magic_cards/__init__.py"""
filename = os.path.join(os.path.dirname(__file__), *file_paths)
version_file = open(filename).read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
version = get_version("src", "magic_cards", "__init__.py")
if sys.argv[-1] == 'publish':
try:
import wheel
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
try:
import twine
except ImportError:
print('Twine library missing. Please run "pip install twine"')
sys.exit()
os.system('python setup.py sdist bdist_wheel --universal')
os.system('twine upload dist/*')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on git:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='django-magic-cards',
version=version,
description="""A Django app for the Oracle text of all Magic: the Gathering cards.""",
long_description=readme + '\n\n' + history,
author='Paul Baranay',
author_email='pbaranay@gmail.com',
url='https://github.com/pbaranay/django-magic-cards',
packages=find_packages(where="src"),
package_dir={"": "src"},
include_package_data=True,
install_requires=["django-light-enums>=0.1.6", "inflect>=0.2.5", "requests>=2.18.2"],
license="MIT",
zip_safe=False,
keywords='django-magic-cards',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'Framework :: Django :: 1.11',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Games/Entertainment',
],
)
|
{
"content_hash": "64c2378803c20df3ae357cb480012e01",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 90,
"avg_line_length": 32.85897435897436,
"alnum_prop": 0.5946156847444402,
"repo_name": "pbaranay/django-magic-cards",
"id": "0f16e2fad447c855fe9d1b4580301ff7310997c5",
"size": "2609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2864"
},
{
"name": "Makefile",
"bytes": "1529"
},
{
"name": "Python",
"bytes": "37871"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import unicode_literals
from acos_client import errors as acos_errors
from acos_client.v21 import base
class HealthMonitor(base.BaseV21):
# Valid types
ICMP = 0
TCP = 1
HTTP = 3
HTTPS = 4
def get(self, name, **kwargs):
return self._post("slb.hm.search", {"name": name}, **kwargs)
def _set(self, action, name, mon_type, interval, timeout, max_retries,
method=None, url=None, expect_code=None, port=None, **kwargs):
defs = {
self.HTTP: {
'protocol': 'http',
'port': 80
},
self.HTTPS: {
'protocol': 'https',
'port': 443
},
self.ICMP: {
'protocol': 'icmp',
},
self.TCP: {
'protocol': 'tcp',
'port': 80
}
}
params = {
'retry': max_retries,
'name': name,
'consec_pass_reqd': max_retries,
'interval': interval,
'timeout': timeout,
'disable_after_down': 0,
'type': mon_type,
}
if mon_type in defs:
params[defs[mon_type]['protocol']] = {
'url': "%s %s" % (method, url),
'expect_code': expect_code,
}
n = port or defs[mon_type].get('port')
if n:
params[defs[mon_type]['protocol']]['port'] = n
try:
self._post(action, params, **kwargs)
except acos_errors.HMMissingHttpPassive:
# Some version of AxAPI 2.1 require this arg
params[defs[mon_type]['protocol']]['passive'] = 0
self._post(action, params, **kwargs)
def create(self, name, mon_type, interval, timeout, max_retries,
method=None, url=None, expect_code=None, port=None, **kwargs):
self._set("slb.hm.create", name, mon_type, interval, timeout,
max_retries, method, url, expect_code, port, **kwargs)
def update(self, name, mon_type, interval, timeout, max_retries,
method=None, url=None, expect_code=None, port=None, **kwargs):
self._set("slb.hm.update", name, mon_type, interval, timeout,
max_retries, method, url, expect_code, port, **kwargs)
def delete(self, name, **kwargs):
self._post("slb.hm.delete", {"name": name}, **kwargs)
|
{
"content_hash": "5c45235b8ae4b74e8838d0da31daccfb",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 77,
"avg_line_length": 34.013698630136986,
"alnum_prop": 0.5054369714055578,
"repo_name": "mdurrant-b3/acos-client",
"id": "46532d770479a6076444042ee92747516f1c2e86",
"size": "3105",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "acos_client/v21/slb/hm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "477240"
}
],
"symlink_target": ""
}
|
import unittest
from ...compatibility import StringIO
from ...drawing import Drawing
class TestWriteXdrcolOff(unittest.TestCase):
"""
Test the Drawing _write_col_off() method.
"""
def setUp(self):
self.fh = StringIO()
self.drawing = Drawing()
self.drawing._set_filehandle(self.fh)
def test_write_col_off(self):
"""Test the _write_col_off() method"""
self.drawing._write_col_off(457200)
exp = """<xdr:colOff>457200</xdr:colOff>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
|
{
"content_hash": "41ebaee6ae09c269226e2bfd877a9a58",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 51,
"avg_line_length": 22.96,
"alnum_prop": 0.6132404181184669,
"repo_name": "applicationdevm/XlsxWriter",
"id": "82f66c379bb550b2e8cf7153d36f5bc47621092a",
"size": "747",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "xlsxwriter/test/drawing/test_write_col_off.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7365"
},
{
"name": "Perl",
"bytes": "3504"
},
{
"name": "Python",
"bytes": "2512197"
},
{
"name": "Shell",
"bytes": "6064"
}
],
"symlink_target": ""
}
|
import sys
import tensorflow as tf
# ModuleNotFoundError is new in 3.6; older versions will throw SystemError
if sys.version_info < (3, 6):
ModuleNotFoundError = SystemError
try:
from . import rnn_inference
from . import sampler_inputs
from .transformer import INT_DTYPE, FLOAT_DTYPE
from . import transformer_inference
except:
import rnn_inference
import sampler_inputs
from transformer import INT_DTYPE, FLOAT_DTYPE
import transformer_inference
class RandomSampler:
"""Implements random sampling with one or more models.
Samples translations by randomly drawing one token at a time according to
the probability distribution over the target vocabulary.
If beam_size > 1, then multiple translations are sampled for each input
sentence. Unlike beam search, the translations are sampled independently
of each other. ('beam_size' is a misnomer in this context, but it
simplifies things if RandomSampler and BeamSearchSampler have a common
interface.)
If there are multiple models, then at each timestep the next token is
sampled according to the sum of the models' log probabilities.
Prior to running the sampler, the placeholders in self.inputs must be
fed appropriate values (see the SamplerInputs class). Model inputs are fed
to the model placeholders, as in training.
The resulting sample can be accessed via the outputs() property, which
returns a pair of tensors, (sequences, scores). sequences contains target
vocabulary IDs and has shape (batch_size_x, beam_size, seq_len), where
seq_len <= max_translation_len is the length of the longest translation.
scores contains floats representing the length-normalized log
probabilities. It has the shape (batch_size_x, beam_size).
TODO Make beam_size a placeholder?
See also: BeamSearchSampler.
"""
def __init__(self, models, configs, beam_size):
"""Sets some things up then calls _random_sample() to do the real work.
Args:
models: a sequence of RNN or Transformer objects.
configs: a sequence of model configs (argparse.Namespace objects).
beam_size: integer specifying the beam width.
"""
self._models = models
self._configs = configs
self._beam_size = beam_size
with tf.compat.v1.name_scope('random_sampler'):
# Define placeholders.
self.inputs = sampler_inputs.SamplerInputs()
# Create an adapter to get a consistent interface to
# Transformer and RNN models.
model_adapters = []
for i, (model, config) in enumerate(zip(models, configs)):
with tf.compat.v1.name_scope('model_adapter_{}'.format(i)) as scope:
if config.model_type == 'transformer':
adapter = transformer_inference.ModelAdapter(
model, config, scope)
else:
assert config.model_type == 'rnn'
adapter = rnn_inference.ModelAdapter(
model, config, scope)
model_adapters.append(adapter)
# Build the graph to do the actual work.
sequences, scores = _random_sample(
model_adapters=model_adapters,
beam_size=beam_size,
batch_size_x=self.inputs.batch_size_x,
max_translation_len=self.inputs.max_translation_len,
normalization_alpha=self.inputs.normalization_alpha,
eos_id=0)
self._outputs = sequences, scores
@property
def outputs(self):
return self._outputs
@property
def models(self):
return self._models
@property
def configs(self):
return self._configs
@property
def beam_size(self):
return self._beam_size
def _random_sample(model_adapters, beam_size, batch_size_x,
max_translation_len, normalization_alpha, eos_id):
"""See description for RandomSampler above.
Args:
model_adapters: sequence of ModelAdapter objects.
beam_size: integer specifying beam width.
batch_size_x: tf.int32 scalar specifying number of input sentences.
max_translation_len: tf.int32 scalar specifying max translation length.
normalization_alpha: tf.float32 scalar specifying alpha parameter for
length normalization.
eos_id: integer specifying the vocabulary ID of the EOS symbol.
Returns:
A pair of tensors: (sequences, scores). sequences contains vocabulary
IDs. It has shape (batch_size, len), where len <= max_translation_len
is the length of the longest translation in the batch. scores contains
sequnces scores, which are summed probabilities.
"""
# Encode the input and generate a 1-step decoding function for each model.
decoding_functions = []
for adapter in model_adapters:
encoder_output = adapter.encode()
func = adapter.generate_decoding_function(encoder_output)
decoding_functions.append(func)
# Initialize the timestep counter.
current_time_step = tf.constant(1)
# Initialize sequences with <GO>.
sequences = tf.ones([batch_size_x*beam_size, 1], dtype=INT_DTYPE)
# Initialize sequence scores.
scores = tf.zeros([batch_size_x*beam_size], dtype=FLOAT_DTYPE)
# Flags indicating which sequences are finished.
finished = tf.fill([batch_size_x*beam_size], False)
# Initialize memories (i.e. states carried over from last timestep.
memories = [ma.generate_initial_memories(batch_size_x, beam_size)
for ma in model_adapters]
# Generate the conditional and body functions for the sampling loop.
loop_cond = _generate_while_loop_cond_func(max_translation_len)
loop_body = _generate_while_loop_body_func(model_adapters,
decoding_functions,
batch_size_x, beam_size, eos_id)
loop_vars = [current_time_step, sequences, scores, memories, finished]
shape_invariants=[
tf.TensorShape([]), # timestep
tf.TensorShape([None, None]), # sequences
tf.TensorShape([None]), # scores
[adapter.get_memory_invariants(mems) # memories
for adapter, mems in zip(model_adapters, memories)],
tf.TensorShape([None])] # finished
_, sequences, scores, _, _ = \
tf.nest.map_structure(tf.stop_gradient, tf.while_loop(cond=loop_cond,
body=loop_body,
loop_vars=loop_vars,
shape_invariants=shape_invariants,
parallel_iterations=10,
swap_memory=False))
# Truncate sequences to remove leading <GO> tokens.
sequences = sequences[:, 1:]
# Normalize scores. Note that we include the <EOS> token when calculating
# sequence length.
seq_len = tf.shape(input=sequences)[1]
indices = tf.range(seq_len, dtype=tf.int32)
indices = tf.tile(tf.expand_dims(indices, 0), [batch_size_x*beam_size, 1])
seq_lens = tf.expand_dims(tf.expand_dims(seq_len, 0), 0)
seq_lens = tf.tile(seq_lens, [batch_size_x*beam_size, seq_len])
eos_indices = tf.compat.v1.where(tf.equal(sequences, eos_id), indices, seq_lens)
lengths = tf.reduce_min(input_tensor=eos_indices+1, axis=1)
float_lengths = tf.cast(lengths, dtype=tf.float32)
length_penalties = float_lengths ** normalization_alpha
scores = scores / length_penalties
# Reshape / transpose to group translations and scores by input sentence.
sequences = tf.reshape(sequences, [beam_size, batch_size_x, seq_len])
sequences = tf.transpose(a=sequences, perm=[1,0,2])
scores = tf.reshape(scores, [beam_size, batch_size_x])
scores = tf.transpose(a=scores, perm=[1,0])
return sequences, scores
def _generate_while_loop_cond_func(max_translation_len):
def continue_decoding(current_time_step, sequences, scores, memories,
finished):
return tf.logical_and(tf.less(current_time_step, max_translation_len),
tf.logical_not(tf.reduce_all(input_tensor=finished)))
return continue_decoding
def _generate_while_loop_body_func(model_adapters, decoding_functions,
batch_size_x, beam_size, eos_id):
def decoding_step(current_time_step, sequences, scores, memories,
finished):
# Get the target vocabulary IDs for this time step.
step_ids = sequences[:, -1]
# Calculate next token probabilities for each model and sum them.
sum_log_probs = None
for i in range(len(model_adapters)):
# Propagate through decoder.
step_logits, memories[i] = decoding_functions[i](
step_ids, current_time_step, memories[i])
# Adjust sampling temperature.
step_logits = model_adapters[i].model.sampling_utils.adjust_logits(
step_logits)
# Calculate log probs for all possible tokens at current time-step.
model_log_probs = tf.nn.log_softmax(step_logits)
# Add to summed log probs.
if sum_log_probs is None:
sum_log_probs = model_log_probs
else:
sum_log_probs += model_log_probs
# Determine the next token to be added to each sequence.
next_ids = tf.squeeze(tf.random.categorical(logits=sum_log_probs, num_samples=1,
dtype=INT_DTYPE),
axis=1)
# Collect scores associated with the selected tokens.
seq_indices = tf.range(batch_size_x * beam_size, dtype=INT_DTYPE)
score_coordinates = tf.stack([seq_indices, next_ids], axis=1)
increments = tf.gather_nd(sum_log_probs, score_coordinates)
# Add scores to cumulative scores, except for sequences that were
# already completed before this timestep.
scores += tf.compat.v1.where(tf.logical_not(finished),
increments,
tf.zeros([batch_size_x * beam_size]))
# Extend each sequence with the next token.
sequences = tf.concat([sequences, tf.expand_dims(next_ids, 1)], 1)
# Check if sequences have been finished (with a <EOS> token).
finished |= tf.equal(tf.reduce_prod(input_tensor=sequences - eos_id, axis=1),
eos_id)
return current_time_step+1, sequences, scores, memories, finished
return decoding_step
|
{
"content_hash": "e272d7f17ded7cb96e4d35ad728404fd",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 88,
"avg_line_length": 40.17037037037037,
"alnum_prop": 0.6254840494191407,
"repo_name": "EdinburghNLP/nematus",
"id": "1f9d8e596ca8b99c7fccd85a5fbffe1d7a45d41e",
"size": "10846",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nematus/random_sampler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Emacs Lisp",
"bytes": "17034"
},
{
"name": "JavaScript",
"bytes": "17029"
},
{
"name": "NewLisp",
"bytes": "1582"
},
{
"name": "PHP",
"bytes": "10635"
},
{
"name": "Perl",
"bytes": "29347"
},
{
"name": "Python",
"bytes": "444994"
},
{
"name": "Ruby",
"bytes": "1649"
},
{
"name": "Shell",
"bytes": "5819"
},
{
"name": "Slash",
"bytes": "356"
},
{
"name": "Smalltalk",
"bytes": "1892"
},
{
"name": "SystemVerilog",
"bytes": "184"
}
],
"symlink_target": ""
}
|
import os, sys
_open = open # rename for internal use -- gets redefined below
def open(dbname, mode = "r"):
text = _open(os.path.join(dbname, "config.dat"), "rb").read()
line = text.split("\n")[0]
if line == "index\tBerkeleyDB/1":
import BerkeleyDB
return BerkeleyDB.open(dbname, mode)
elif line == "index\tflat/1":
import FlatDB
return FlatDB.open(dbname, mode)
raise TypeError("Unknown index type: %r" % (line,))
def main():
from Bio import Std
import XPath
import FlatDB
XPath.xpath_index(
#dbname = "sprot_flat",
dbname = "sprot_small",
filenames = ["/home/dalke/ftps/swissprot/smaller_sprot38.dat",
#filenames = ["/home/dalke/ftps/swissprot/sprot38.dat",
],
primary_namespace = "entry",
extract_info = [
("entry", "//entry_name"),
("accession", "//%s[@type='accession']" % (Std.dbid.tag,)),
],
#creator_factory = FlatDB.CreateFlatDB,
)
if __name__ == "__main__":
main()
|
{
"content_hash": "edd9f771d26dad7b222d5b9731315ff6",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 70,
"avg_line_length": 28.105263157894736,
"alnum_prop": 0.5580524344569289,
"repo_name": "dbmi-pitt/DIKB-Micropublication",
"id": "d5ceb1f4a1c9a9eb9d84473d23fc0c448612d7f1",
"size": "1068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/mp-scripts/Bio/Mindy/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3682138"
},
{
"name": "R",
"bytes": "4656"
},
{
"name": "Shell",
"bytes": "786"
}
],
"symlink_target": ""
}
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
from openmdao.api import Problem
from wakeexchange.OptimizationGroups import OptAEP
from wakeexchange.gauss import gauss_wrapper, add_gauss_params_IndepVarComps
from wakeexchange.floris import floris_wrapper, add_floris_params_IndepVarComps
if __name__ == "__main__":
nTurbines = 2
nDirections = 1
rotorDiameter = 126.4
rotorArea = np.pi*rotorDiameter*rotorDiameter/4.0
axialInduction = 1.0/3.0
CP = 0.7737/0.944 * 4.0 * 1.0/3.0 * np.power((1 - 1.0/3.0), 2)
# CP =0.768 * 4.0 * 1.0/3.0 * np.power((1 - 1.0/3.0), 2)
CT = 4.0*axialInduction*(1.0-axialInduction)
generator_efficiency = 0.944
# Define turbine characteristics
axialInduction = np.array([axialInduction, axialInduction])
rotorDiameter = np.array([rotorDiameter, rotorDiameter])
generatorEfficiency = np.array([generator_efficiency, generator_efficiency])
yaw = np.array([0., 0.])
# Define site measurements
wind_direction = 270.-0.523599*180./np.pi
wind_speed = 8. # m/s
air_density = 1.1716
Ct = np.array([CT, CT])
Cp = np.array([CP, CP])
gauss_prob = Problem(root=OptAEP(nTurbines=nTurbines, nDirections=nDirections, use_rotor_components=False,
wake_model=gauss_wrapper, wake_model_options={'nSamples': 0}, datasize=0,
params_IdepVar_func=add_gauss_params_IndepVarComps,
params_IndepVar_args={}))
floris_prob = Problem(root=OptAEP(nTurbines=nTurbines, nDirections=nDirections, use_rotor_components=False,
wake_model=floris_wrapper, wake_model_options=None, datasize=0,
params_IdepVar_func=add_floris_params_IndepVarComps,
params_IndepVar_args={}))
probs = [gauss_prob, floris_prob]
for prob in probs:
prob.setup()
turbineX = np.array([1118.1, 1881.9])
turbineY = np.array([1279.5, 1720.5])
prob['turbineX'] = turbineX
prob['turbineY'] = turbineY
prob['rotorDiameter'] = rotorDiameter
prob['axialInduction'] = axialInduction
prob['generatorEfficiency'] = generatorEfficiency
prob['air_density'] = air_density
prob['Cp_in'] = Cp
prob['Ct_in'] = Ct
prob['windSpeeds'] = np.array([wind_speed])
prob['windDirections'] = np.array([wind_direction])
# gauss_prob['model_params:ke'] = 0.052
# gauss_prob['model_params:spread_angle'] = 6.
# gauss_prob['model_params:rotation_offset_angle'] = 2.0
# for axialInd calc only
# gauss_prob['model_params:ke'] = 0.050688
# gauss_prob['model_params:spread_angle'] = 7.562716
# gauss_prob['model_params:rotation_offset_angle'] = 3.336568
# for axialInd and inflow adjust
# gauss_prob['model_params:ke'] = 0.052333
# gauss_prob['model_params:spread_angle'] = 8.111330
# gauss_prob['model_params:rotation_offset_angle'] = 2.770265
# for inflow adjust only
# gauss_prob['model_params:ke'] = 0.052230
# gauss_prob['model_params:spread_angle'] = 6.368191
# gauss_prob['model_params:rotation_offset_angle'] = 1.855112
# for added n_st_dev param #1
# gauss_prob['model_params:ke'] = 0.050755
# gauss_prob['model_params:spread_angle'] = 11.205766#*0.97
# gauss_prob['model_params:rotation_offset_angle'] = 3.651790
# gauss_prob['model_params:n_std_dev'] = 9.304371
# for added n_st_dev param #2
# gauss_prob['model_params:ke'] = 0.051010
# gauss_prob['model_params:spread_angle'] = 11.779591
# gauss_prob['model_params:rotation_offset_angle'] = 3.564547
# gauss_prob['model_params:n_std_dev'] = 9.575505
# for decoupled ky with n_std_dev = 4
# gauss_prob['model_params:ke'] = 0.051145
# gauss_prob['model_params:spread_angle'] = 2.617982
# gauss_prob['model_params:rotation_offset_angle'] = 3.616082
# gauss_prob['model_params:ky'] = 0.211496
# for integrating for decoupled ky with n_std_dev = 4, linear, integrating
# gauss_prob['model_params:ke'] = 0.016969
# gauss_prob['model_params:spread_angle'] = 0.655430
# gauss_prob['model_params:rotation_offset_angle'] = 3.615754
# gauss_prob['model_params:ky'] = 0.195392
# for integrating for decoupled ky with n_std_dev = 4, linear, integrating
# gauss_prob['model_params:ke'] = 0.008858
# gauss_prob['model_params:spread_angle'] = 0.000000
# gauss_prob['model_params:rotation_offset_angle'] = 4.035276
# gauss_prob['model_params:ky'] = 0.199385
# for decoupled ke with n_std_dev=4, linear, not integrating
# gauss_prob['model_params:ke'] = 0.051190
# gauss_prob['model_params:spread_angle'] = 2.619202
# gauss_prob['model_params:rotation_offset_angle'] = 3.629337
# gauss_prob['model_params:ky'] = 0.211567
# for decoupled ky with n_std_dev = 4, error = 1332.49, not integrating, power law
gauss_prob['model_params:ke'] = 0.051360
gauss_prob['model_params:rotation_offset_angle'] = 3.197348
gauss_prob['model_params:Dw0'] = 1.804024
gauss_prob['model_params:m'] = 0.0
# for decoupled ky with n_std_dev = 4, error = 1630.8, with integrating, power law
# gauss_prob['model_params:ke'] = 0.033165
# gauss_prob['model_params:rotation_offset_angle'] = 3.328051
# gauss_prob['model_params:Dw0'] = 1.708328
# gauss_prob['model_params:m'] = 0.0
# for decoupled ky with n_std_dev = 4, error = 1140.59, not integrating, power law for expansion,
# linear for yaw
# gauss_prob['model_params:ke'] = 0.050741
# gauss_prob['model_params:rotation_offset_angle'] = 3.628737
# gauss_prob['model_params:Dw0'] = 0.846582
# gauss_prob['model_params:ky'] = 0.207734
# for decoupled ky with n_std_dev = 4, error = 1058.73, integrating, power law for expansion,
# linear for yaw
# gauss_prob['model_params:ke'] = 0.016129
# gauss_prob['model_params:rotation_offset_angle'] = 3.644356
# gauss_prob['model_params:Dw0'] = 0.602132
# gauss_prob['model_params:ky'] = 0.191178
gauss_prob['model_params:integrate'] = False
gauss_prob['model_params:spread_mode'] = 'power'
gauss_prob['model_params:n_std_dev'] = 4
ICOWESdata = loadmat('../data/YawPosResults.mat')
yawrange = ICOWESdata['yaw'][0]
GaussianPower = list()
FlorisPower = list()
import time
t1 = time.time()
for i in range(0, 100):
gauss_prob.run()
t2 = time.time()
for i in range(0, 100):
floris_prob.run()
t3 = time.time()
# gauss time: 0.0580031871796
# floris time: 0.10697388649
print 'gauss time: ', t2-t1
print 'floris time: ', t3-t2
# quit()
for yaw1 in yawrange:
for prob in probs:
prob['yaw0'] = np.array([yaw1, 0.0])
prob.run()
GaussianPower.append(list(gauss_prob['wtPower0']))
FlorisPower.append(list(floris_prob['wtPower0']))
GaussianPower = np.array(GaussianPower)
FlorisPower = np.array(FlorisPower)
# print FlorisPower
SOWFApower = np.array([ICOWESdata['yawPowerT1'][0], ICOWESdata['yawPowerT2'][0]]).transpose()/1000.
fig, axes = plt.subplots(ncols=2, nrows=2, sharey=False)
power_scalar = 1E-3
axes[0, 0].plot(yawrange.transpose(), FlorisPower[:, 0]*power_scalar, 'b', yawrange.transpose(), SOWFApower[:, 0]*power_scalar, 'o', mec='b', mfc='none')
axes[0, 0].plot(yawrange.transpose(), FlorisPower[:, 1]*power_scalar, 'b', yawrange.transpose(), SOWFApower[:, 1]*power_scalar, '^', mec='b', mfc='none')
axes[0, 0].plot(yawrange.transpose(), FlorisPower[:, 0]*power_scalar+FlorisPower[:, 1]*power_scalar, '-k', yawrange.transpose(), SOWFApower[:, 0]*power_scalar
+ SOWFApower[:, 1]*power_scalar, 'ko')
axes[0, 0].plot(yawrange.transpose(), GaussianPower[:, 0]*power_scalar, '--r')
axes[0, 0].plot(yawrange.transpose(), GaussianPower[:, 1]*power_scalar, '--r')
axes[0, 0].plot(yawrange.transpose(), GaussianPower[:, 0]*power_scalar+GaussianPower[:, 1]*power_scalar, '--k')
axes[0, 0].set_xlabel('yaw angle (deg.)')
axes[0, 0].set_ylabel('Power (MW)')
# error_turbine2 = np.sum(np.abs(FLORISpower[:, 1] - SOWFApower[:, 1]))
posrange = ICOWESdata['pos'][0]
for prob in probs:
prob['yaw0'] = np.array([0.0, 0.0])
GaussianPower = list()
FlorisPower = list()
for pos2 in posrange:
# Define turbine locations and orientation
effUdXY = 0.523599
Xinit = np.array([1118.1, 1881.9])
Yinit = np.array([1279.5, 1720.5])
XY = np.array([Xinit, Yinit]) + np.dot(np.array([[np.cos(effUdXY), -np.sin(effUdXY)],
[np.sin(effUdXY), np.cos(effUdXY)]]),
np.array([[0., 0], [0, pos2]]))
for prob in probs:
prob['turbineX'] = XY[0, :]
prob['turbineY'] = XY[1, :]
prob.run()
GaussianPower.append(list(gauss_prob['wtPower0']))
FlorisPower.append(list(floris_prob['wtPower0']))
GaussianPower = np.array(GaussianPower)
FlorisPower = np.array(FlorisPower)
SOWFApower = np.array([ICOWESdata['posPowerT1'][0], ICOWESdata['posPowerT2'][0]]).transpose()/1000.
# print error_turbine2
axes[0, 1].plot(posrange/rotorDiameter[0], FlorisPower[:, 0]*power_scalar, 'b', posrange/rotorDiameter[0], SOWFApower[:, 0]*power_scalar, 'o', mec='b', mfc='none')
axes[0, 1].plot(posrange/rotorDiameter[0], FlorisPower[:, 1]*power_scalar, 'b', posrange/rotorDiameter[0], SOWFApower[:, 1]*power_scalar, '^', mec='b', mfc='none')
axes[0, 1].plot(posrange/rotorDiameter[0], FlorisPower[:, 0]*power_scalar+FlorisPower[:, 1]*power_scalar, 'k-', posrange/rotorDiameter[0], SOWFApower[:, 0]*power_scalar+SOWFApower[:, 1]*power_scalar, 'ko')
axes[0, 1].plot(posrange/rotorDiameter[0], GaussianPower[:, 0]*power_scalar, '--r')
axes[0, 1].plot(posrange/rotorDiameter[0], GaussianPower[:, 1]*power_scalar, '--r')
axes[0, 1].plot(posrange/rotorDiameter[0], GaussianPower[:, 0]*power_scalar+GaussianPower[:, 1]*power_scalar, '--k')
axes[0, 1].set_xlabel('y/D')
axes[0, 1].set_ylabel('Power (MW)')
posrange = np.linspace(-3.*rotorDiameter[0], 3.*rotorDiameter[0], num=1000)
for prob in probs:
prob['yaw0'] = np.array([0.0, 0.0])
prob['windDirections'] = np.array([270.])
prob['turbineX'] = np.array([0, 7.*rotorDiameter[0]])
GaussianVelocity = list()
FlorisVelocity = list()
for pos2 in posrange:
for prob in probs:
prob['turbineY'] = np.array([0, pos2])
prob.run()
GaussianVelocity.append(list(gauss_prob['wtVelocity0']))
FlorisVelocity.append(list(floris_prob['wtVelocity0']))
FlorisVelocity = np.array(FlorisVelocity)
GaussianVelocity = np.array(GaussianVelocity)
axes[1, 0].plot(posrange/rotorDiameter[0], FlorisVelocity[:, 1], 'b', label='Floris')
axes[1, 0].plot(posrange/rotorDiameter[0], GaussianVelocity[:, 1], '--r', label='Gaussian')
axes[1, 0].set_ylim([6.0, 8.5])
axes[1, 0].set_xlim([-3.0, 3.0])
axes[1, 0].set_xlabel('y/D')
axes[1, 0].set_ylabel('Velocity (m/s)')
# plt.legend()
# plt.show()
posrange = np.linspace(-1.*rotorDiameter[0], 30.*rotorDiameter[0], num=2000)
for prob in probs:
prob['turbineY'] = np.array([0, 0])
GaussianVelocity = list()
FlorisVelocity = list()
for pos2 in posrange:
for prob in probs:
prob['turbineX'] = np.array([0, pos2])
prob.run()
GaussianVelocity.append(list(gauss_prob['wtVelocity0']))
FlorisVelocity.append(list(floris_prob['wtVelocity0']))
FlorisVelocity = np.array(FlorisVelocity)
GaussianVelocity = np.array(GaussianVelocity)
axes[1, 1].plot(posrange/rotorDiameter[0], FlorisVelocity[:, 1], 'b', label='Floris')
axes[1, 1].plot(posrange/rotorDiameter[0], GaussianVelocity[:, 1], '--r', label='Gaussian')
axes[1, 1].plot(np.array([7.0, 7.0]), np.array([0.0, 9.0]), ':k', label='Tuning Point')
plt.xlabel('x/D')
plt.ylabel('Velocity (m/s)')
plt.legend(loc=4)
plt.show()
|
{
"content_hash": "01d2fd432e56dcb1e15945b624979216",
"timestamp": "",
"source": "github",
"line_count": 306,
"max_line_length": 209,
"avg_line_length": 40.1797385620915,
"alnum_prop": 0.6246441642944286,
"repo_name": "byuflowlab/gaussian-wake",
"id": "4464703548da2fc456c878cf85a011d470ef7ade",
"size": "12295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/compare.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "19153"
},
{
"name": "Fortran",
"bytes": "472411"
},
{
"name": "OpenEdge ABL",
"bytes": "152581"
},
{
"name": "Python",
"bytes": "142289"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Plan.is_default'
db.delete_column(u'physical_plan', 'is_default')
def backwards(self, orm):
# Adding field 'Plan.is_default'
db.add_column(u'physical_plan', 'is_default',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
models = {
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'available_size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'flipperfox_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'flipperfox_migration_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical']
|
{
"content_hash": "070e8354724a5ddb351c1e15dab6bdaa",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 239,
"avg_line_length": 88.10596026490066,
"alnum_prop": 0.5634395670475045,
"repo_name": "globocom/database-as-a-service",
"id": "52e867236fbcfd36793969aec62326225aabc590",
"size": "13328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbaas/physical/migrations/0040_auto__del_field_plan_is_default.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "243568"
},
{
"name": "Dockerfile",
"bytes": "1372"
},
{
"name": "HTML",
"bytes": "310401"
},
{
"name": "JavaScript",
"bytes": "988830"
},
{
"name": "Makefile",
"bytes": "5199"
},
{
"name": "Python",
"bytes": "9674426"
},
{
"name": "Shell",
"bytes": "215115"
}
],
"symlink_target": ""
}
|
from luma.oled.device import ssd1327
from luma.core.render import canvas
from luma.core.framebuffer import diff_to_previous, full_frame
from baseline_data import get_reference_data, primitives
from helpers import serial, assert_invalid_dimensions, setup_function # noqa: F401
from unittest.mock import call
def test_init_128x128():
"""
SSD1327 OLED with a 128 x 128 resolution works correctly.
"""
ssd1327(serial, framebuffer=full_frame())
serial.command.assert_has_calls([
call(174, 160, 83, 161, 0, 162, 0, 164, 168, 127),
call(184, 1, 17, 34, 50, 67, 84, 101, 118),
call(179, 0, 171, 1, 177, 241, 188, 8, 190, 7, 213, 98, 182, 15),
call(129, 127),
call(21, 0, 63, 117, 0, 127),
call(175)
])
# Next 4096 are all data: zero's to clear the RAM
# (4096 = 128 * 128 / 2)
serial.data.assert_called_once_with([0] * (128 * 128 // 2))
def test_init_invalid_dimensions():
"""
SSD1327 OLED with an invalid resolution raises a
:py:class:`luma.core.error.DeviceDisplayModeError`.
"""
assert_invalid_dimensions(ssd1327, serial, 128, 77)
def test_hide():
"""
SSD1327 OLED screen content can be hidden.
"""
device = ssd1327(serial)
serial.reset_mock()
device.hide()
serial.command.assert_called_once_with(174)
def test_show():
"""
SSD1327 OLED screen content can be displayed.
"""
device = ssd1327(serial)
serial.reset_mock()
device.show()
serial.command.assert_called_once_with(175)
def test_greyscale_display():
"""
SSD1327 OLED screen can draw and display a greyscale image.
"""
device = ssd1327(serial, mode="RGB", framebuffer=full_frame())
serial.reset_mock()
# Use the same drawing primitives as the demo
with canvas(device) as draw:
primitives(device, draw)
# Initial command to reset the display
serial.command.assert_called_once_with(21, 0, 63, 117, 0, 127)
# To regenerate test data, uncomment the following (remember not to commit though)
# ================================================================================
# from baseline_data import save_reference_data
# save_reference_data("demo_ssd1327_greyscale", serial.data.call_args.args[0])
# Next 4096 bytes are data representing the drawn image
serial.data.assert_called_once_with(get_reference_data('demo_ssd1327_greyscale'))
def test_monochrome_display():
"""
SSD1327 OLED screen can draw and display a monochrome image.
"""
device = ssd1327(serial, mode="1", framebuffer=full_frame())
serial.reset_mock()
# Use the same drawing primitives as the demo
with canvas(device) as draw:
primitives(device, draw)
# Initial command to reset the display
serial.command.assert_called_once_with(21, 0, 63, 117, 0, 127)
# To regenerate test data, uncomment the following (remember not to commit though)
# ================================================================================
# from baseline_data import save_reference_data
# save_reference_data("demo_ssd1327_monochrome", serial.data.call_args.args[0])
# Next 4096 bytes are data representing the drawn image
serial.data.assert_called_once_with(get_reference_data('demo_ssd1327_monochrome'))
def test_framebuffer_override():
"""
Reproduce https://github.com/rm-hull/luma.examples/issues/95
"""
ssd1327(serial, mode="1", framebuffer=diff_to_previous())
|
{
"content_hash": "38eaf7e369c1cb753c9be094ca2871ec",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 86,
"avg_line_length": 32.691588785046726,
"alnum_prop": 0.6400800457404231,
"repo_name": "rm-hull/ssd1306",
"id": "1b677dd4993e85bff2ae6754aca41b8a84a7e80f",
"size": "3630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_ssd1327.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "133730"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import xml_models
import xml_models.rest_client as rest_client
from lxml import etree
from xml_models.xpath_finder import MultipleNodesReturnedException
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
class ModelManager(object):
"""
Handles what can be queried for, and acts as the entry point for querying.
The API and usage is intended to be familiar to Django users however it does not support the complete Django
ModelManager API.
"""
def __init__(self, model, finders):
self.model = model
self.finders = {}
self.headers = {}
for key in finders.keys():
field_names = [field if isinstance(field, str) else field._name for field in key]
sorted_field_names = list(field_names)
sorted_field_names.sort()
self.finders[tuple(sorted_field_names)] = (finders[key], field_names)
def filter(self, **kw):
"""
Filter models by key-value pairs.
:Example:
.. code-block:: python
Model.objects.filter(attr1=value1,attr2=value2)
How the actual HTTP request is handled is determined by :ref:`finders`.
:param kw: key value pairs of field name and value
:return: lazy query
"""
return ModelQuery(self, self.model, headers=self.headers).filter(**kw)
def all(self, **kw):
"""
Get all models.
:Example:
.. code-block:: python
Model.objects.all()
How the actual HTTP request is handled is determined by :ref:`finders`.
:param kw: optional key value pairs of field name and value
:return: lazy query
"""
return ModelQuery(self, self.model, headers=self.headers).filter(**kw)
def filter_custom(self, url):
"""
Set a URL to be called when querying
:param url: full URL
:return: lazy query
"""
return ModelQuery(self, self.model, headers=self.headers).filter_custom(url)
def count(self):
"""
Get a count
:return: int
"""
return ModelQuery(self, self.model, headers=self.headers).count()
def get(self, **kw):
"""
Get a single object.
This can be called directly with key-value pairs or after setting some ``filters``
:param kw:
:return:
"""
return ModelQuery(self, self.model, headers=self.headers).get(**kw)
# this is an internal class and should not be exposed to end users so we don't need docstrings
# pylint: disable=missing-docstring
class ModelQuery(object):
def __init__(self, manager, model, headers=None):
self.manager = manager
self.model = model
self.args = {}
self.headers = headers or {}
self.custom_url = None
# When calling list(query) list will call __count__ before __iter__, both of which will call _fetch &
# _fragments. We keep a cache of fetched URLs and parsed out fragments so as to prevent fetching and parsing
# the tree twice.
self.__fragment_cache = []
self.__fetch_cache = {}
def filter(self, **kw):
for key in kw.keys():
self.args[key] = kw[key]
return self
def filter_custom(self, url):
self.custom_url = url
return self
def count(self):
response = self._fetch()
return len(list(self._fragments(response.content)))
def __iter__(self):
response = self._fetch()
for fragment in self._fragments(response.content):
yield self.model(fragment)
def __len__(self):
return self.count()
def get(self, **kw):
for key in kw.keys():
self.args[key] = kw[key]
response = self._fetch()
if not response.content or response.response_code == 404:
raise DoesNotExist(self.model, self.args)
if not response.content:
raise DoesNotExist(self.model, self.args)
content = response.content
node_to_find = getattr(self.model, 'collection_node', None)
if node_to_find:
tree = etree.fromstring(content)
node = tree.find('.//' + node_to_find).getchildren()
if len(node) > 1:
raise MultipleNodesReturnedException
content = etree.tostring(node[0])
return self.model(content)
def _fetch(self):
# the caching here may be better handled with requests caching?
url = self._find_query_path()
if not url in self.__fetch_cache:
self.__fetch_cache[url] = rest_client.Client("", verify=xml_models.VERIFY).GET(url, headers=self.headers)
return self.__fetch_cache[url]
def _fragments(self, xml):
if len(self.__fragment_cache):
for item in self.__fragment_cache:
yield item
return
if not xml:
raise DoesNotExist(self.model, self.args)
xpath_to_find = getattr(self.model, 'collection_xpath', None)
node_to_find = getattr(self.model, 'collection_node', None)
if node_to_find:
xpath_to_find = '//' + node_to_find
if xpath_to_find:
tree = etree.parse(StringIO(xml.encode()))
for node in tree.xpath(xpath_to_find):
if node.getchildren():
for n in node.getchildren():
yield etree.tostring(n)
else:
yield etree.tostring(node)
return
# no collection node/xpath
tree = etree.iterparse(StringIO(xml.encode()), ['start', 'end'])
_, child = next(tree) # assume there is a wrapper tag
_, child = next(tree) # this is the tag we care about
node_name = child.tag
for event, elem in tree:
if event == 'end' and elem.tag == node_name:
result = etree.tostring(elem)
elem.clear()
self.__fragment_cache.append(result)
yield result
def _find_query_path(self):
if self.custom_url:
return self.custom_url
keys = self.args.keys()
keys = sorted(keys)
key_tuple = tuple(keys)
try:
(url, attrs) = self.manager.finders[key_tuple]
return url % tuple([self.args[x] for x in attrs])
except KeyError:
raise NoRegisteredFinderError(str(key_tuple))
class NoRegisteredFinderError(Exception):
pass
class ValidationError(Exception):
pass
class DoesNotExist(Exception):
def __init__(self, model, args):
Exception.__init__(self, "DoesNotExist: %s matching query %s does not exist" % (model.__name__, str(args)))
|
{
"content_hash": "408d36d74c7dc51ab2dd49f3e47e4e94",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 117,
"avg_line_length": 30.877828054298643,
"alnum_prop": 0.5860199296600235,
"repo_name": "alephnullplex/xml_models2",
"id": "14a31a2bf9462272e6b89554ad40c21b39f30a45",
"size": "6824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xml_models/managers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "58547"
}
],
"symlink_target": ""
}
|
from migrate.changeset import UniqueConstraint
from migrate import ForeignKeyConstraint
from oslo_log import log as logging
from sqlalchemy import Boolean, BigInteger, Column, DateTime, Enum, Float
from sqlalchemy import dialects
from sqlalchemy import ForeignKey, Index, Integer, MetaData, String, Table
from sqlalchemy import Text
from sqlalchemy.types import NullType
from nova.i18n import _LE
LOG = logging.getLogger(__name__)
# Note on the autoincrement flag: this is defaulted for primary key columns
# of integral type, so is no longer set explicitly in such cases.
# NOTE(dprince): This wrapper allows us to easily match the Folsom MySQL
# Schema. In Folsom we created tables as latin1 and converted them to utf8
# later. This conversion causes some of the Text columns on MySQL to get
# created as mediumtext instead of just text.
def MediumText():
return Text().with_variant(dialects.mysql.MEDIUMTEXT(), 'mysql')
def Inet():
return String(length=43).with_variant(dialects.postgresql.INET(),
'postgresql')
def InetSmall():
return String(length=39).with_variant(dialects.postgresql.INET(),
'postgresql')
def _create_shadow_tables(migrate_engine):
meta = MetaData(migrate_engine)
meta.reflect(migrate_engine)
table_names = list(meta.tables.keys())
meta.bind = migrate_engine
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
columns = []
for column in table.columns:
column_copy = None
# NOTE(boris-42): BigInteger is not supported by sqlite, so
# after copy it will have NullType, other
# types that are used in Nova are supported by
# sqlite.
if isinstance(column.type, NullType):
column_copy = Column(column.name, BigInteger(), default=0)
if table_name == 'instances' and column.name == 'locked_by':
enum = Enum('owner', 'admin',
name='shadow_instances0locked_by')
column_copy = Column(column.name, enum)
else:
column_copy = column.copy()
columns.append(column_copy)
shadow_table_name = 'shadow_' + table_name
shadow_table = Table(shadow_table_name, meta, *columns,
mysql_engine='InnoDB')
try:
shadow_table.create()
except Exception:
LOG.info(repr(shadow_table))
LOG.exception(_LE('Exception while creating table.'))
raise
# NOTE(dprince): we add these here so our schema contains dump tables
# which were added in migration 209 (in Havana). We can drop these in
# Icehouse: https://bugs.launchpad.net/nova/+bug/1266538
def _create_dump_tables(migrate_engine):
meta = MetaData(migrate_engine)
meta.reflect(migrate_engine)
table_names = ['compute_node_stats', 'compute_nodes', 'instance_actions',
'instance_actions_events', 'instance_faults', 'migrations']
for table_name in table_names:
table = Table(table_name, meta, autoload=True)
dump_table_name = 'dump_' + table.name
columns = []
for column in table.columns:
# NOTE(dprince): The dump_ tables were originally created from an
# earlier schema version so we don't want to add the pci_stats
# column so that schema diffs are exactly the same.
if column.name == 'pci_stats':
continue
else:
columns.append(column.copy())
table_dump = Table(dump_table_name, meta, *columns,
mysql_engine='InnoDB')
table_dump.create()
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
agent_builds = Table('agent_builds', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('hypervisor', String(length=255)),
Column('os', String(length=255)),
Column('architecture', String(length=255)),
Column('version', String(length=255)),
Column('url', String(length=255)),
Column('md5hash', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregate_hosts = Table('aggregate_hosts', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('host', String(length=255)),
Column('aggregate_id', Integer, ForeignKey('aggregates.id'),
nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregate_metadata = Table('aggregate_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('aggregate_id', Integer, ForeignKey('aggregates.id'),
nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
aggregates = Table('aggregates', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
block_device_mapping = Table('block_device_mapping', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('device_name', String(length=255), nullable=True),
Column('delete_on_termination', Boolean),
Column('snapshot_id', String(length=36), nullable=True),
Column('volume_id', String(length=36), nullable=True),
Column('volume_size', Integer),
Column('no_device', Boolean),
Column('connection_info', MediumText()),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
Column('source_type', String(length=255), nullable=True),
Column('destination_type', String(length=255), nullable=True),
Column('guest_format', String(length=255), nullable=True),
Column('device_type', String(length=255), nullable=True),
Column('disk_bus', String(length=255), nullable=True),
Column('boot_index', Integer),
Column('image_id', String(length=36), nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
bw_usage_cache = Table('bw_usage_cache', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('start_period', DateTime, nullable=False),
Column('last_refreshed', DateTime),
Column('bw_in', BigInteger),
Column('bw_out', BigInteger),
Column('mac', String(length=255)),
Column('uuid', String(length=36)),
Column('last_ctr_in', BigInteger()),
Column('last_ctr_out', BigInteger()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
cells = Table('cells', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('api_url', String(length=255)),
Column('weight_offset', Float),
Column('weight_scale', Float),
Column('name', String(length=255)),
Column('is_parent', Boolean),
Column('deleted', Integer),
Column('transport_url', String(length=255), nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
certificates = Table('certificates', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('file_name', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
compute_node_stats = Table('compute_node_stats', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('compute_node_id', Integer, nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
compute_nodes = Table('compute_nodes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('service_id', Integer, nullable=False),
Column('vcpus', Integer, nullable=False),
Column('memory_mb', Integer, nullable=False),
Column('local_gb', Integer, nullable=False),
Column('vcpus_used', Integer, nullable=False),
Column('memory_mb_used', Integer, nullable=False),
Column('local_gb_used', Integer, nullable=False),
Column('hypervisor_type', MediumText(), nullable=False),
Column('hypervisor_version', Integer, nullable=False),
Column('cpu_info', MediumText(), nullable=False),
Column('disk_available_least', Integer),
Column('free_ram_mb', Integer),
Column('free_disk_gb', Integer),
Column('current_workload', Integer),
Column('running_vms', Integer),
Column('hypervisor_hostname', String(length=255)),
Column('deleted', Integer),
Column('host_ip', InetSmall()),
Column('supported_instances', Text),
Column('pci_stats', Text, nullable=True),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
console_pools = Table('console_pools', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('username', String(length=255)),
Column('password', String(length=255)),
Column('console_type', String(length=255)),
Column('public_hostname', String(length=255)),
Column('host', String(length=255)),
Column('compute_host', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
consoles_instance_uuid_column_args = ['instance_uuid', String(length=36)]
consoles_instance_uuid_column_args.append(
ForeignKey('instances.uuid', name='consoles_instance_uuid_fkey'))
consoles = Table('consoles', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_name', String(length=255)),
Column('password', String(length=255)),
Column('port', Integer),
Column('pool_id', Integer, ForeignKey('console_pools.id')),
Column(*consoles_instance_uuid_column_args),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
dns_domains = Table('dns_domains', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Boolean),
Column('domain', String(length=255), primary_key=True, nullable=False),
Column('scope', String(length=255)),
Column('availability_zone', String(length=255)),
Column('project_id', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
fixed_ips = Table('fixed_ips', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('network_id', Integer),
Column('allocated', Boolean),
Column('leased', Boolean),
Column('reserved', Boolean),
Column('virtual_interface_id', Integer),
Column('host', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
floating_ips = Table('floating_ips', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', InetSmall()),
Column('fixed_ip_id', Integer),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('auto_assigned', Boolean),
Column('pool', String(length=255)),
Column('interface', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_faults = Table('instance_faults', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_uuid', String(length=36)),
Column('code', Integer, nullable=False),
Column('message', String(length=255)),
Column('details', MediumText()),
Column('host', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_id_mappings = Table('instance_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_info_caches = Table('instance_info_caches', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('network_info', MediumText()),
Column('instance_uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
groups = Table('instance_groups', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('uuid', String(length=36), nullable=False),
Column('name', String(length=255)),
UniqueConstraint('uuid', 'deleted',
name='uniq_instance_groups0uuid0deleted'),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_metadata = Table('instance_group_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('group_id', Integer, ForeignKey('instance_groups.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_policy = Table('instance_group_policy', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('policy', String(length=255)),
Column('group_id', Integer, ForeignKey('instance_groups.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
group_member = Table('instance_group_member', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_id', String(length=255)),
Column('group_id', Integer, ForeignKey('instance_groups.id'),
nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
instance_metadata = Table('instance_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('instance_uuid', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_system_metadata = Table('instance_system_metadata', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_uuid', String(length=36), nullable=False),
Column('key', String(length=255), nullable=False),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_type_extra_specs = Table('instance_type_extra_specs', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_type_id', Integer, ForeignKey('instance_types.id'),
nullable=False),
Column('key', String(length=255)),
Column('value', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_type_projects = Table('instance_type_projects', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('instance_type_id', Integer, nullable=False),
Column('project_id', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_types = Table('instance_types', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('name', String(length=255)),
Column('id', Integer, primary_key=True, nullable=False),
Column('memory_mb', Integer, nullable=False),
Column('vcpus', Integer, nullable=False),
Column('swap', Integer, nullable=False),
Column('vcpu_weight', Integer),
Column('flavorid', String(length=255)),
Column('rxtx_factor', Float),
Column('root_gb', Integer),
Column('ephemeral_gb', Integer),
Column('disabled', Boolean),
Column('is_public', Boolean),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
inst_lock_enum = Enum('owner', 'admin', name='instances0locked_by')
instances = Table('instances', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('internal_id', Integer),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('image_ref', String(length=255)),
Column('kernel_id', String(length=255)),
Column('ramdisk_id', String(length=255)),
Column('launch_index', Integer),
Column('key_name', String(length=255)),
Column('key_data', MediumText()),
Column('power_state', Integer),
Column('vm_state', String(length=255)),
Column('memory_mb', Integer),
Column('vcpus', Integer),
Column('hostname', String(length=255)),
Column('host', String(length=255)),
Column('user_data', MediumText()),
Column('reservation_id', String(length=255)),
Column('scheduled_at', DateTime),
Column('launched_at', DateTime),
Column('terminated_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('availability_zone', String(length=255)),
Column('locked', Boolean),
Column('os_type', String(length=255)),
Column('launched_on', MediumText()),
Column('instance_type_id', Integer),
Column('vm_mode', String(length=255)),
Column('uuid', String(length=36)),
Column('architecture', String(length=255)),
Column('root_device_name', String(length=255)),
Column('access_ip_v4', InetSmall()),
Column('access_ip_v6', InetSmall()),
Column('config_drive', String(length=255)),
Column('task_state', String(length=255)),
Column('default_ephemeral_device', String(length=255)),
Column('default_swap_device', String(length=255)),
Column('progress', Integer),
Column('auto_disk_config', Boolean),
Column('shutdown_terminate', Boolean),
Column('disable_terminate', Boolean),
Column('root_gb', Integer),
Column('ephemeral_gb', Integer),
Column('cell_name', String(length=255)),
Column('node', String(length=255)),
Column('deleted', Integer),
Column('locked_by', inst_lock_enum),
Column('cleaned', Integer, default=0),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instance_actions = Table('instance_actions', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('action', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('request_id', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('start_time', DateTime),
Column('finish_time', DateTime),
Column('message', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
instance_actions_events = Table('instance_actions_events', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('event', String(length=255)),
Column('action_id', Integer, ForeignKey('instance_actions.id')),
Column('start_time', DateTime),
Column('finish_time', DateTime),
Column('result', String(length=255)),
Column('traceback', Text),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
iscsi_targets = Table('iscsi_targets', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('target_num', Integer),
Column('host', String(length=255)),
Column('volume_id', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
key_pairs = Table('key_pairs', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('user_id', String(length=255)),
Column('fingerprint', String(length=255)),
Column('public_key', MediumText()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
migrations = Table('migrations', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('source_compute', String(length=255)),
Column('dest_compute', String(length=255)),
Column('dest_host', String(length=255)),
Column('status', String(length=255)),
Column('instance_uuid', String(length=36)),
Column('old_instance_type_id', Integer),
Column('new_instance_type_id', Integer),
Column('source_node', String(length=255)),
Column('dest_node', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
networks = Table('networks', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('injected', Boolean),
Column('cidr', Inet()),
Column('netmask', InetSmall()),
Column('bridge', String(length=255)),
Column('gateway', InetSmall()),
Column('broadcast', InetSmall()),
Column('dns1', InetSmall()),
Column('vlan', Integer),
Column('vpn_public_address', InetSmall()),
Column('vpn_public_port', Integer),
Column('vpn_private_address', InetSmall()),
Column('dhcp_start', InetSmall()),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('cidr_v6', Inet()),
Column('gateway_v6', InetSmall()),
Column('label', String(length=255)),
Column('netmask_v6', InetSmall()),
Column('bridge_interface', String(length=255)),
Column('multi_host', Boolean),
Column('dns2', InetSmall()),
Column('uuid', String(length=36)),
Column('priority', Integer),
Column('rxtx_base', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
pci_devices_uc_name = 'uniq_pci_devices0compute_node_id0address0deleted'
pci_devices = Table('pci_devices', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Integer, default=0, nullable=False),
Column('id', Integer, primary_key=True),
Column('compute_node_id', Integer, nullable=False),
Column('address', String(12), nullable=False),
Column('product_id', String(4)),
Column('vendor_id', String(4)),
Column('dev_type', String(8)),
Column('dev_id', String(255)),
Column('label', String(255), nullable=False),
Column('status', String(36), nullable=False),
Column('extra_info', Text, nullable=True),
Column('instance_uuid', String(36), nullable=True),
Index('ix_pci_devices_compute_node_id_deleted',
'compute_node_id', 'deleted'),
Index('ix_pci_devices_instance_uuid_deleted',
'instance_uuid', 'deleted'),
UniqueConstraint('compute_node_id',
'address', 'deleted',
name=pci_devices_uc_name),
mysql_engine='InnoDB',
mysql_charset='utf8')
provider_fw_rules = Table('provider_fw_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('protocol', String(length=5)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quota_classes = Table('quota_classes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('class_name', String(length=255)),
Column('resource', String(length=255)),
Column('hard_limit', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quota_usages = Table('quota_usages', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('project_id', String(length=255)),
Column('resource', String(length=255)),
Column('in_use', Integer, nullable=False),
Column('reserved', Integer, nullable=False),
Column('until_refresh', Integer),
Column('deleted', Integer),
Column('user_id', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
quotas = Table('quotas', meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('project_id', String(length=255)),
Column('resource', String(length=255), nullable=False),
Column('hard_limit', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
uniq_name = "uniq_project_user_quotas0user_id0project_id0resource0deleted"
project_user_quotas = Table('project_user_quotas', meta,
Column('id', Integer, primary_key=True,
nullable=False),
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer),
Column('user_id',
String(length=255),
nullable=False),
Column('project_id',
String(length=255),
nullable=False),
Column('resource',
String(length=255),
nullable=False),
Column('hard_limit', Integer, nullable=True),
UniqueConstraint('user_id', 'project_id', 'resource',
'deleted', name=uniq_name),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
reservations = Table('reservations', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('usage_id', Integer, nullable=False),
Column('project_id', String(length=255)),
Column('resource', String(length=255)),
Column('delta', Integer, nullable=False),
Column('expire', DateTime),
Column('deleted', Integer),
Column('user_id', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
s3_images = Table('s3_images', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_instance_association = \
Table('security_group_instance_association', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('security_group_id', Integer),
Column('instance_uuid', String(length=36)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_rules = Table('security_group_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('parent_group_id', Integer, ForeignKey('security_groups.id')),
Column('protocol', String(length=255)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
Column('group_id', Integer, ForeignKey('security_groups.id')),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_groups = Table('security_groups', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=255)),
Column('description', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
security_group_default_rules = Table('security_group_default_rules', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('deleted', Integer, default=0),
Column('id', Integer, primary_key=True, nullable=False),
Column('protocol', String(length=5)),
Column('from_port', Integer),
Column('to_port', Integer),
Column('cidr', Inet()),
mysql_engine='InnoDB',
mysql_charset='utf8',
)
services = Table('services', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('host', String(length=255)),
Column('binary', String(length=255)),
Column('topic', String(length=255)),
Column('report_count', Integer, nullable=False),
Column('disabled', Boolean),
Column('deleted', Integer),
Column('disabled_reason', String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
snapshot_id_mappings = Table('snapshot_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
snapshots = Table('snapshots', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', String(length=36), primary_key=True, nullable=False),
Column('volume_id', String(length=36), nullable=False),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('status', String(length=255)),
Column('progress', String(length=255)),
Column('volume_size', Integer),
Column('scheduled_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('deleted', String(length=36)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
task_log = Table('task_log', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('task_name', String(length=255), nullable=False),
Column('state', String(length=255), nullable=False),
Column('host', String(length=255), nullable=False),
Column('period_beginning', DateTime, nullable=False),
Column('period_ending', DateTime, nullable=False),
Column('message', String(length=255), nullable=False),
Column('task_items', Integer),
Column('errors', Integer),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
virtual_interfaces = Table('virtual_interfaces', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('address', String(length=255)),
Column('network_id', Integer),
Column('uuid', String(length=36)),
Column('instance_uuid', String(length=36), nullable=True),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_id_mappings = Table('volume_id_mappings', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(length=36), nullable=False),
Column('deleted', Integer),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volumes = Table('volumes', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('deleted_at', DateTime),
Column('id', String(length=36), primary_key=True, nullable=False),
Column('ec2_id', String(length=255)),
Column('user_id', String(length=255)),
Column('project_id', String(length=255)),
Column('host', String(length=255)),
Column('size', Integer),
Column('availability_zone', String(length=255)),
Column('mountpoint', String(length=255)),
Column('status', String(length=255)),
Column('attach_status', String(length=255)),
Column('scheduled_at', DateTime),
Column('launched_at', DateTime),
Column('terminated_at', DateTime),
Column('display_name', String(length=255)),
Column('display_description', String(length=255)),
Column('provider_location', String(length=256)),
Column('provider_auth', String(length=256)),
Column('snapshot_id', String(length=36)),
Column('volume_type_id', Integer),
Column('instance_uuid', String(length=36)),
Column('attach_time', DateTime),
Column('deleted', String(length=36)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
volume_usage_cache = Table('volume_usage_cache', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('volume_id', String(36), nullable=False),
Column('tot_last_refreshed', DateTime(timezone=False)),
Column('tot_reads', BigInteger(), default=0),
Column('tot_read_bytes', BigInteger(), default=0),
Column('tot_writes', BigInteger(), default=0),
Column('tot_write_bytes', BigInteger(), default=0),
Column('curr_last_refreshed', DateTime(timezone=False)),
Column('curr_reads', BigInteger(), default=0),
Column('curr_read_bytes', BigInteger(), default=0),
Column('curr_writes', BigInteger(), default=0),
Column('curr_write_bytes', BigInteger(), default=0),
Column('deleted', Integer),
Column("instance_uuid", String(length=36)),
Column("project_id", String(length=36)),
Column("user_id", String(length=36)),
Column("availability_zone", String(length=255)),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
instances.create()
Index('project_id', instances.c.project_id).create()
Index('uuid', instances.c.uuid, unique=True).create()
# create all tables
tables = [aggregates, console_pools, instance_types,
security_groups, snapshots, volumes,
# those that are children and others later
agent_builds, aggregate_hosts, aggregate_metadata,
block_device_mapping, bw_usage_cache, cells,
certificates, compute_node_stats, compute_nodes, consoles,
dns_domains, fixed_ips, floating_ips,
instance_faults, instance_id_mappings, instance_info_caches,
instance_metadata, instance_system_metadata,
instance_type_extra_specs, instance_type_projects,
instance_actions, instance_actions_events,
groups, group_metadata, group_policy, group_member,
iscsi_targets, key_pairs, migrations, networks,
pci_devices, provider_fw_rules, quota_classes, quota_usages,
quotas, project_user_quotas,
reservations, s3_images, security_group_instance_association,
security_group_rules, security_group_default_rules,
services, snapshot_id_mappings, task_log,
virtual_interfaces,
volume_id_mappings,
volume_usage_cache]
for table in tables:
try:
table.create()
except Exception:
LOG.info(repr(table))
LOG.exception(_LE('Exception while creating table.'))
raise
# task log unique constraint
task_log_uc = "uniq_task_log0task_name0host0period_beginning0period_ending"
task_log_cols = ('task_name', 'host', 'period_beginning', 'period_ending')
uc = UniqueConstraint(*task_log_cols, table=task_log, name=task_log_uc)
uc.create()
# networks unique constraint
UniqueConstraint('vlan', 'deleted', table=networks,
name='uniq_networks0vlan0deleted').create()
# instance_type_name constraint
UniqueConstraint('name', 'deleted', table=instance_types,
name='uniq_instance_types0name0deleted').create()
# flavorid unique constraint
UniqueConstraint('flavorid', 'deleted', table=instance_types,
name='uniq_instance_types0flavorid0deleted').create()
# keypair constraint
UniqueConstraint('user_id', 'name', 'deleted', table=key_pairs,
name='uniq_key_pairs0user_id0name0deleted').create()
# instance_type_projects constraint
inst_type_uc_name = 'uniq_instance_type_projects0instance_type_id0' + \
'project_id0deleted'
UniqueConstraint('instance_type_id', 'project_id', 'deleted',
table=instance_type_projects,
name=inst_type_uc_name).create()
# floating_ips unique constraint
UniqueConstraint('address', 'deleted',
table=floating_ips,
name='uniq_floating_ips0address0deleted').create()
# instance_info_caches
UniqueConstraint('instance_uuid',
table=instance_info_caches,
name='uniq_instance_info_caches0instance_uuid').create()
UniqueConstraint('address', 'deleted',
table=virtual_interfaces,
name='uniq_virtual_interfaces0address0deleted').create()
# cells
UniqueConstraint('name', 'deleted',
table=cells,
name='uniq_cells0name0deleted').create()
# security_groups
uc = UniqueConstraint('project_id', 'name', 'deleted',
table=security_groups,
name='uniq_security_groups0project_id0name0deleted')
uc.create()
# quotas
UniqueConstraint('project_id', 'resource', 'deleted',
table=quotas,
name='uniq_quotas0project_id0resource0deleted').create()
# fixed_ips
UniqueConstraint('address', 'deleted',
table=fixed_ips,
name='uniq_fixed_ips0address0deleted').create()
# services
UniqueConstraint('host', 'topic', 'deleted',
table=services,
name='uniq_services0host0topic0deleted').create()
UniqueConstraint('host', 'binary', 'deleted',
table=services,
name='uniq_services0host0binary0deleted').create()
# agent_builds
uc_name = 'uniq_agent_builds0hypervisor0os0architecture0deleted'
UniqueConstraint('hypervisor', 'os', 'architecture', 'deleted',
table=agent_builds,
name=uc_name).create()
uc_name = 'uniq_console_pools0host0console_type0compute_host0deleted'
UniqueConstraint('host', 'console_type', 'compute_host', 'deleted',
table=console_pools,
name=uc_name).create()
uc_name = 'uniq_aggregate_hosts0host0aggregate_id0deleted'
UniqueConstraint('host', 'aggregate_id', 'deleted',
table=aggregate_hosts,
name=uc_name).create()
uc_name = 'uniq_aggregate_metadata0aggregate_id0key0deleted'
UniqueConstraint('aggregate_id', 'key', 'deleted',
table=aggregate_metadata,
name=uc_name).create()
uc_name = 'uniq_instance_type_extra_specs0instance_type_id0key0deleted'
UniqueConstraint('instance_type_id', 'key', 'deleted',
table=instance_type_extra_specs,
name=uc_name).create()
# created first (to preserve ordering for schema diffs)
mysql_pre_indexes = [
Index('instance_type_id', instance_type_projects.c.instance_type_id),
Index('project_id', dns_domains.c.project_id),
Index('fixed_ip_id', floating_ips.c.fixed_ip_id),
Index('network_id', virtual_interfaces.c.network_id),
Index('network_id', fixed_ips.c.network_id),
Index('fixed_ips_virtual_interface_id_fkey',
fixed_ips.c.virtual_interface_id),
Index('address', fixed_ips.c.address),
Index('fixed_ips_instance_uuid_fkey', fixed_ips.c.instance_uuid),
Index('instance_uuid', instance_system_metadata.c.instance_uuid),
Index('iscsi_targets_volume_id_fkey', iscsi_targets.c.volume_id),
Index('snapshot_id', block_device_mapping.c.snapshot_id),
Index('usage_id', reservations.c.usage_id),
Index('virtual_interfaces_instance_uuid_fkey',
virtual_interfaces.c.instance_uuid),
Index('volume_id', block_device_mapping.c.volume_id),
Index('security_group_id',
security_group_instance_association.c.security_group_id),
]
# Common indexes (indexes we apply to all databases)
# NOTE: order specific for MySQL diff support
common_indexes = [
# aggregate_metadata
Index('aggregate_metadata_key_idx', aggregate_metadata.c.key),
# agent_builds
Index('agent_builds_hypervisor_os_arch_idx',
agent_builds.c.hypervisor,
agent_builds.c.os,
agent_builds.c.architecture),
# block_device_mapping
Index('block_device_mapping_instance_uuid_idx',
block_device_mapping.c.instance_uuid),
Index('block_device_mapping_instance_uuid_device_name_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.device_name),
# NOTE(dprince): This is now a duplicate index on MySQL and needs to
# be removed there. We leave it here so the Index ordering
# matches on schema diffs (for MySQL).
# See Havana migration 186_new_bdm_format where we dropped the
# virtual_name column.
# IceHouse fix is here: https://bugs.launchpad.net/nova/+bug/1265839
Index(
'block_device_mapping_instance_uuid_virtual_name_device_name_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.device_name),
Index('block_device_mapping_instance_uuid_volume_id_idx',
block_device_mapping.c.instance_uuid,
block_device_mapping.c.volume_id),
# bw_usage_cache
Index('bw_usage_cache_uuid_start_period_idx',
bw_usage_cache.c.uuid, bw_usage_cache.c.start_period),
Index('certificates_project_id_deleted_idx',
certificates.c.project_id, certificates.c.deleted),
Index('certificates_user_id_deleted_idx', certificates.c.user_id,
certificates.c.deleted),
# compute_node_stats
Index('ix_compute_node_stats_compute_node_id',
compute_node_stats.c.compute_node_id),
Index('compute_node_stats_node_id_and_deleted_idx',
compute_node_stats.c.compute_node_id,
compute_node_stats.c.deleted),
# consoles
Index('consoles_instance_uuid_idx', consoles.c.instance_uuid),
# dns_domains
Index('dns_domains_domain_deleted_idx',
dns_domains.c.domain, dns_domains.c.deleted),
# fixed_ips
Index('fixed_ips_host_idx', fixed_ips.c.host),
Index('fixed_ips_network_id_host_deleted_idx', fixed_ips.c.network_id,
fixed_ips.c.host, fixed_ips.c.deleted),
Index('fixed_ips_address_reserved_network_id_deleted_idx',
fixed_ips.c.address, fixed_ips.c.reserved,
fixed_ips.c.network_id, fixed_ips.c.deleted),
Index('fixed_ips_deleted_allocated_idx', fixed_ips.c.address,
fixed_ips.c.deleted, fixed_ips.c.allocated),
# floating_ips
Index('floating_ips_host_idx', floating_ips.c.host),
Index('floating_ips_project_id_idx', floating_ips.c.project_id),
Index('floating_ips_pool_deleted_fixed_ip_id_project_id_idx',
floating_ips.c.pool, floating_ips.c.deleted,
floating_ips.c.fixed_ip_id, floating_ips.c.project_id),
# group_member
Index('instance_group_member_instance_idx',
group_member.c.instance_id),
# group_metadata
Index('instance_group_metadata_key_idx', group_metadata.c.key),
# group_policy
Index('instance_group_policy_policy_idx', group_policy.c.policy),
# instances
Index('instances_reservation_id_idx',
instances.c.reservation_id),
Index('instances_terminated_at_launched_at_idx',
instances.c.terminated_at,
instances.c.launched_at),
Index('instances_task_state_updated_at_idx',
instances.c.task_state,
instances.c.updated_at),
Index('instances_host_deleted_idx', instances.c.host,
instances.c.deleted),
Index('instances_uuid_deleted_idx', instances.c.uuid,
instances.c.deleted),
Index('instances_host_node_deleted_idx', instances.c.host,
instances.c.node, instances.c.deleted),
Index('instances_host_deleted_cleaned_idx',
instances.c.host, instances.c.deleted,
instances.c.cleaned),
# instance_actions
Index('instance_uuid_idx', instance_actions.c.instance_uuid),
Index('request_id_idx', instance_actions.c.request_id),
# instance_faults
Index('instance_faults_host_idx', instance_faults.c.host),
Index('instance_faults_instance_uuid_deleted_created_at_idx',
instance_faults.c.instance_uuid, instance_faults.c.deleted,
instance_faults.c.created_at),
# instance_id_mappings
Index('ix_instance_id_mappings_uuid', instance_id_mappings.c.uuid),
# instance_metadata
Index('instance_metadata_instance_uuid_idx',
instance_metadata.c.instance_uuid),
# instance_type_extra_specs
Index('instance_type_extra_specs_instance_type_id_key_idx',
instance_type_extra_specs.c.instance_type_id,
instance_type_extra_specs.c.key),
# iscsi_targets
Index('iscsi_targets_host_idx', iscsi_targets.c.host),
Index('iscsi_targets_host_volume_id_deleted_idx',
iscsi_targets.c.host, iscsi_targets.c.volume_id,
iscsi_targets.c.deleted),
# migrations
Index('migrations_by_host_nodes_and_status_idx',
migrations.c.deleted, migrations.c.source_compute,
migrations.c.dest_compute, migrations.c.source_node,
migrations.c.dest_node, migrations.c.status),
Index('migrations_instance_uuid_and_status_idx',
migrations.c.deleted, migrations.c.instance_uuid,
migrations.c.status),
# networks
Index('networks_host_idx', networks.c.host),
Index('networks_cidr_v6_idx', networks.c.cidr_v6),
Index('networks_bridge_deleted_idx', networks.c.bridge,
networks.c.deleted),
Index('networks_project_id_deleted_idx', networks.c.project_id,
networks.c.deleted),
Index('networks_uuid_project_id_deleted_idx',
networks.c.uuid, networks.c.project_id, networks.c.deleted),
Index('networks_vlan_deleted_idx', networks.c.vlan,
networks.c.deleted),
# project_user_quotas
Index('project_user_quotas_project_id_deleted_idx',
project_user_quotas.c.project_id,
project_user_quotas.c.deleted),
Index('project_user_quotas_user_id_deleted_idx',
project_user_quotas.c.user_id, project_user_quotas.c.deleted),
# reservations
Index('ix_reservations_project_id', reservations.c.project_id),
Index('ix_reservations_user_id_deleted',
reservations.c.user_id, reservations.c.deleted),
Index('reservations_uuid_idx', reservations.c.uuid),
# security_group_instance_association
Index('security_group_instance_association_instance_uuid_idx',
security_group_instance_association.c.instance_uuid),
# task_log
Index('ix_task_log_period_beginning', task_log.c.period_beginning),
Index('ix_task_log_host', task_log.c.host),
Index('ix_task_log_period_ending', task_log.c.period_ending),
# quota_classes
Index('ix_quota_classes_class_name', quota_classes.c.class_name),
# quota_usages
Index('ix_quota_usages_project_id', quota_usages.c.project_id),
Index('ix_quota_usages_user_id_deleted',
quota_usages.c.user_id, quota_usages.c.deleted),
# volumes
Index('volumes_instance_uuid_idx', volumes.c.instance_uuid),
]
# MySQL specific indexes
if migrate_engine.name == 'mysql':
for index in mysql_pre_indexes:
index.create(migrate_engine)
# mysql-specific index by leftmost 100 chars. (mysql gets angry if the
# index key length is too long.)
sql = ("create index migrations_by_host_nodes_and_status_idx ON "
"migrations (deleted, source_compute(100), dest_compute(100), "
"source_node(100), dest_node(100), status)")
migrate_engine.execute(sql)
# PostgreSQL specific indexes
if migrate_engine.name == 'postgresql':
Index('address', fixed_ips.c.address).create()
# NOTE(dprince): PostgreSQL doesn't allow duplicate indexes
# so we skip creation of select indexes (so schemas match exactly).
POSTGRES_INDEX_SKIPS = [
# See Havana migration 186_new_bdm_format where we dropped the
# virtual_name column.
# IceHouse fix is here: https://bugs.launchpad.net/nova/+bug/1265839
'block_device_mapping_instance_uuid_virtual_name_device_name_idx'
]
MYSQL_INDEX_SKIPS = [
# we create this one manually for MySQL above
'migrations_by_host_nodes_and_status_idx'
]
for index in common_indexes:
if ((migrate_engine.name == 'postgresql' and
index.name in POSTGRES_INDEX_SKIPS) or
(migrate_engine.name == 'mysql' and
index.name in MYSQL_INDEX_SKIPS)):
continue
else:
index.create(migrate_engine)
Index('project_id', dns_domains.c.project_id).drop
# Common foreign keys
fkeys = [
[[instance_type_projects.c.instance_type_id],
[instance_types.c.id],
'instance_type_projects_ibfk_1'],
[[iscsi_targets.c.volume_id],
[volumes.c.id],
'iscsi_targets_volume_id_fkey'],
[[reservations.c.usage_id],
[quota_usages.c.id],
'reservations_ibfk_1'],
[[security_group_instance_association.c.security_group_id],
[security_groups.c.id],
'security_group_instance_association_ibfk_1'],
[[compute_node_stats.c.compute_node_id],
[compute_nodes.c.id],
'fk_compute_node_stats_compute_node_id'],
[[compute_nodes.c.service_id],
[services.c.id],
'fk_compute_nodes_service_id'],
]
secgroup_instance_association_instance_uuid_fkey = (
'security_group_instance_association_instance_uuid_fkey')
fkeys.extend(
[
[[fixed_ips.c.instance_uuid],
[instances.c.uuid],
'fixed_ips_instance_uuid_fkey'],
[[block_device_mapping.c.instance_uuid],
[instances.c.uuid],
'block_device_mapping_instance_uuid_fkey'],
[[instance_info_caches.c.instance_uuid],
[instances.c.uuid],
'instance_info_caches_instance_uuid_fkey'],
[[instance_metadata.c.instance_uuid],
[instances.c.uuid],
'instance_metadata_instance_uuid_fkey'],
[[instance_system_metadata.c.instance_uuid],
[instances.c.uuid],
'instance_system_metadata_ibfk_1'],
[[security_group_instance_association.c.instance_uuid],
[instances.c.uuid],
secgroup_instance_association_instance_uuid_fkey],
[[virtual_interfaces.c.instance_uuid],
[instances.c.uuid],
'virtual_interfaces_instance_uuid_fkey'],
[[instance_actions.c.instance_uuid],
[instances.c.uuid],
'fk_instance_actions_instance_uuid'],
[[instance_faults.c.instance_uuid],
[instances.c.uuid],
'fk_instance_faults_instance_uuid'],
[[migrations.c.instance_uuid],
[instances.c.uuid],
'fk_migrations_instance_uuid']
])
for fkey_pair in fkeys:
if migrate_engine.name in ('mysql'):
# For MySQL we name our fkeys explicitly
# so they match Havana
fkey = ForeignKeyConstraint(columns=fkey_pair[0],
refcolumns=fkey_pair[1],
name=fkey_pair[2])
fkey.create()
elif migrate_engine.name == 'postgresql':
# PostgreSQL names things like it wants (correct and compatible!)
fkey = ForeignKeyConstraint(columns=fkey_pair[0],
refcolumns=fkey_pair[1])
fkey.create()
if migrate_engine.name == 'mysql':
# In Folsom we explicitly converted migrate_version to UTF8.
migrate_engine.execute(
'ALTER TABLE migrate_version CONVERT TO CHARACTER SET utf8')
# Set default DB charset to UTF8.
migrate_engine.execute(
'ALTER DATABASE %s DEFAULT CHARACTER SET utf8' %
migrate_engine.url.database)
_create_shadow_tables(migrate_engine)
_create_dump_tables(migrate_engine)
|
{
"content_hash": "4379a919bd1f815803e2036977508eb0",
"timestamp": "",
"source": "github",
"line_count": 1522,
"max_line_length": 79,
"avg_line_length": 40.48883048620237,
"alnum_prop": 0.5923017006361158,
"repo_name": "Juniper/nova",
"id": "317e3c3f932520ce2bf6b1e0fb34c7328e6fbf5a",
"size": "62237",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/db/sqlalchemy/migrate_repo/versions/216_havana.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "601"
},
{
"name": "PHP",
"bytes": "23962"
},
{
"name": "Python",
"bytes": "19816434"
},
{
"name": "Shell",
"bytes": "27717"
},
{
"name": "Smarty",
"bytes": "339635"
}
],
"symlink_target": ""
}
|
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import threading
import time
import numpy as np
from six.moves import zip_longest
from tensorflow.contrib.data.python.kernel_tests import dataset_serialization_test_base
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.contrib.data.python.ops import interleave_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class InterleaveDatasetTest(test.TestCase):
def _interleave(self, lists, cycle_length, block_length):
# TODO(b/69678297): Consolidate python interleave implementations.
num_open = 0
# `all_iterators` acts as a queue of iterators over each element of `lists`.
all_iterators = [iter(l) for l in lists]
# `open_iterators` are the iterators whose elements are currently being
# interleaved.
open_iterators = []
for i in range(cycle_length):
if all_iterators:
open_iterators.append(all_iterators.pop(0))
num_open += 1
else:
open_iterators.append(None)
while num_open or all_iterators:
for i in range(cycle_length):
if open_iterators[i] is None:
if all_iterators:
open_iterators[i] = all_iterators.pop(0)
num_open += 1
else:
continue
for _ in range(block_length):
try:
yield next(open_iterators[i])
except StopIteration:
open_iterators[i] = None
num_open -= 1
break
def testPythonImplementation(self):
input_lists = [[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6],
[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]]
# Cycle length 1 acts like `Dataset.flat_map()`.
expected_elements = itertools.chain(*input_lists)
for expected, produced in zip(
expected_elements, self._interleave(input_lists, 1, 1)):
self.assertEqual(expected, produced)
# Cycle length > 1.
expected_elements = [4, 5, 4, 5, 4, 5, 4,
5, 5, 6, 6, # NOTE(mrry): When we cycle back
# to a list and are already at
# the end of that list, we move
# on to the next element.
4, 6, 4, 6, 4, 6, 4, 6, 5, 6, 5, 6, 5, 6, 5, 6, 5]
for expected, produced in zip(
expected_elements, self._interleave(input_lists, 2, 1)):
self.assertEqual(expected, produced)
# Cycle length > 1 and block length > 1.
expected_elements = [4, 4, 4, 5, 5, 5, 4, 5, 5, 6, 6, 6, 4, 4, 4, 6, 6, 6,
4, 5, 5, 5, 6, 6, 6, 5, 5, 6, 6, 6]
for expected, produced in zip(
expected_elements, self._interleave(input_lists, 2, 3)):
self.assertEqual(expected, produced)
# Cycle length > len(input_values).
expected_elements = [4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6,
4, 4, 5, 5, 6, 6, 5, 6, 6, 5, 6, 6]
for expected, produced in zip(
expected_elements, self._interleave(input_lists, 7, 2)):
self.assertEqual(expected, produced)
def testInterleaveDataset(self):
input_values = array_ops.placeholder(dtypes.int64, shape=[None])
cycle_length = array_ops.placeholder(dtypes.int64, shape=[])
block_length = array_ops.placeholder(dtypes.int64, shape=[])
repeat_count = 2
dataset = (
dataset_ops.Dataset.from_tensor_slices(input_values)
.repeat(repeat_count)
.interleave(lambda x: dataset_ops.Dataset.from_tensors(x).repeat(x),
cycle_length, block_length))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
next_element = iterator.get_next()
with self.test_session() as sess:
# Cycle length 1 acts like `Dataset.flat_map()`.
sess.run(init_op, feed_dict={input_values: [4, 5, 6],
cycle_length: 1, block_length: 3})
for expected_element in self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * repeat_count, 1, 3):
self.assertEqual(expected_element, sess.run(next_element))
# Cycle length > 1.
# expected: [4, 5, 4, 5, 4, 5, 4, 5, 5, 6, 6, 4, 6, 4, 6, 4, 6, 4, 6, 5,
# 6, 5, 6, 5, 6, 5, 6, 5]
sess.run(init_op, feed_dict={input_values: [4, 5, 6],
cycle_length: 2, block_length: 1})
for expected_element in self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * repeat_count, 2, 1):
self.assertEqual(expected_element, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
# Cycle length > 1 and block length > 1.
# expected: [4, 4, 4, 5, 5, 5, 4, 5, 5, 6, 6, 6, 4, 4, 4, 6, 6, 6, 4, 5,
# 5, 5, 6, 6, 6, 5, 5, 6, 6, 6]
sess.run(init_op, feed_dict={input_values: [4, 5, 6],
cycle_length: 2, block_length: 3})
for expected_element in self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * repeat_count, 2, 3):
self.assertEqual(expected_element, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
# Cycle length > len(input_values) * repeat_count.
# expected: [4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 4, 4, 5, 5, 6, 6, 4, 4,
# 5, 5, 6, 6, 5, 6, 6, 5, 6, 6]
sess.run(init_op, feed_dict={input_values: [4, 5, 6],
cycle_length: 7, block_length: 2})
for expected_element in self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * repeat_count, 7, 2):
self.assertEqual(expected_element, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
# Empty input.
sess.run(init_op, feed_dict={input_values: [],
cycle_length: 2, block_length: 3})
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
# Non-empty input leading to empty output.
sess.run(init_op, feed_dict={input_values: [0, 0, 0],
cycle_length: 2, block_length: 3})
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
# Mixture of non-empty and empty interleaved datasets.
sess.run(init_op, feed_dict={input_values: [4, 0, 6],
cycle_length: 2, block_length: 3})
for expected_element in self._interleave(
[[4] * 4, [], [6] * 6] * repeat_count, 2, 3):
self.assertEqual(expected_element, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
iterator = (
dataset_ops.Dataset.range(10).map(_map_fn).interleave(
_interleave_fn, cycle_length=1).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
for j in range(2):
expected = [i, 0] if j % 2 == 0 else [0, -i]
self.assertAllEqual(expected, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
class InterleaveDatasetSeriazationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self, input_values, cycle_length, block_length):
repeat_count = 2
return dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
repeat_count).interleave(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(x),
cycle_length, block_length)
def testSerializationCore(self):
input_values = np.array([4, 5, 6], dtype=np.int64)
num_outputs = np.sum(input_values) * 2
# cycle_length > 1, block_length > 1
cycle_length = 2
block_length = 3
# pylint: disable=g-long-lambda
self.run_core_tests(
lambda: self._build_iterator_graph(
input_values, cycle_length, block_length),
lambda: self._build_iterator_graph(
input_values, cycle_length * 2, block_length * 1),
num_outputs)
# cycle_length = 1
cycle_length = 1
block_length = 3
self.run_core_tests(
lambda: self._build_iterator_graph(
input_values, cycle_length, block_length),
None, num_outputs)
# block_length = 1
cycle_length = 2
block_length = 1
self.run_core_tests(
lambda: self._build_iterator_graph(
input_values, cycle_length, block_length),
None, num_outputs)
# pylint: enable=g-long-lambda
def testSparseCore(self):
def _map_fn(i):
return sparse_tensor.SparseTensorValue(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
def _build_dataset():
return dataset_ops.Dataset.range(10).map(_map_fn).interleave(
_interleave_fn, cycle_length=1)
self.run_core_tests(_build_dataset, None, 20)
class ParallelInterleaveDatasetTest(test.TestCase):
def setUp(self):
self.input_values = array_ops.placeholder(dtypes.int64, shape=[None])
self.cycle_length = array_ops.placeholder(dtypes.int64, shape=[])
self.block_length = array_ops.placeholder(dtypes.int64, shape=[])
self.sloppy = array_ops.placeholder(dtypes.bool, shape=[])
self.buffer_output_elements = array_ops.placeholder(dtypes.int64, shape=[])
self.prefetch_input_elements = array_ops.placeholder(dtypes.int64, shape=[])
self.error = None
self.repeat_count = 2
# Set up threading events used to sequence when items are produced that
# are subsequently interleaved. These events allow us to deterministically
# simulate slowdowns and force sloppiness.
self.read_coordination_events = {}
self.write_coordination_events = {}
# input values [4, 5, 6] are the common case for the tests; set defaults
for i in range(4, 7):
self.read_coordination_events[i] = threading.Semaphore(0)
self.write_coordination_events[i] = threading.Event()
def map_py_fn(x):
self.write_coordination_events[x].wait()
self.write_coordination_events[x].clear()
self.read_coordination_events[x].release()
if self.error:
err = self.error
self.error = None
raise err # pylint: disable=raising-bad-type
return x * x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(x)
return dataset.map(map_fn)
self.dataset = (
dataset_ops.Dataset.from_tensor_slices(self.input_values)
.repeat(self.repeat_count).apply(
interleave_ops.parallel_interleave(interleave_fn, self.cycle_length,
self.block_length, self.sloppy,
self.buffer_output_elements,
self.prefetch_input_elements)))
self.iterator = self.dataset.make_initializable_iterator()
self.init_op = self.iterator.initializer
self.next_element = self.iterator.get_next()
def _interleave(self, lists, cycle_length, block_length):
"""Python implementation of interleave used for testing."""
num_open = 0
# `all_iterators` acts as a queue of iterators over each element of `lists`.
all_iterators = [iter(l) for l in lists]
# `open_iterators` are the iterators whose elements are currently being
# interleaved.
open_iterators = []
for i in range(cycle_length):
if all_iterators:
open_iterators.append(all_iterators.pop(0))
num_open += 1
else:
open_iterators.append(None)
while num_open or all_iterators:
for i in range(cycle_length):
if open_iterators[i] is None:
if all_iterators:
open_iterators[i] = all_iterators.pop(0)
num_open += 1
else:
continue
for _ in range(block_length):
try:
yield next(open_iterators[i])
except StopIteration:
open_iterators[i] = None
num_open -= 1
break
def testPythonImplementation(self):
input_lists = [[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6],
[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]]
# Cycle length 1 acts like `Dataset.flat_map()`.
expected_elements = itertools.chain(*input_lists)
for expected, produced in zip(expected_elements,
self._interleave(input_lists, 1, 1)):
self.assertEqual(expected, produced)
# Cycle length > 1.
expected_elements = [
4, 5, 4, 5, 4, 5, 4, 5, 5, 6, 6, 4, 6, 4, 6, 4, 6, 4, 6, 5, 6, 5, 6, 5,
6, 5, 6, 5, 6, 6
]
for index, (expected, produced) in enumerate(
zip_longest(expected_elements, self._interleave(input_lists, 2, 1))):
self.assertEqual(expected, produced, "Values differ at %s. %s != %s" %
(index, expected, produced))
def testPythonImplementationBlockLength(self):
input_lists = [[4] * 4, [5] * 5, [6] * 6] * 2
expected_elements = [
4, 4, 5, 5, 4, 4, 5, 5, 5, 6, 6, 4, 4, 6, 6, 4, 4, 6, 6, 5, 5, 6, 6, 5,
5, 6, 6, 5, 6, 6
]
for index, (expected, produced) in enumerate(
zip_longest(expected_elements, self._interleave(input_lists, 2, 2))):
self.assertEqual(expected, produced, "Values differ at %s. %s != %s" %
(index, expected, produced))
def testPythonImplementationEmptyLists(self):
input_lists = [[4, 4, 4, 4], [], [6, 6, 6, 6, 6, 6], [4, 4, 4, 4], [],
[6, 6, 6, 6, 6, 6]]
expected_elements = [
4, 4, 6, 4, 6, 4, 6, 6, 4, 6, 4, 6, 4, 4, 6, 6, 6, 6, 6, 6
]
for index, (expected, produced) in enumerate(
zip_longest(expected_elements, self._interleave(input_lists, 2, 1))):
self.assertEqual(expected, produced, "Values differ at %s. %s != %s" %
(index, expected, produced))
def _clear_coordination_events(self):
for i in range(4, 7):
self.read_coordination_events[i] = threading.Semaphore(0)
self.write_coordination_events[i].clear()
def _allow_all_map_threads(self):
for i in range(4, 7):
self.write_coordination_events[i].set()
def _testSingleThreaded(self, sloppy=False, prefetch_input_elements=0):
# cycle_length=1,block_length=1 acts like `Dataset.interleave()` and
# `Dataset.flat_map()` and is single-threaded. No synchronization required.
with self.test_session() as sess:
self._clear_coordination_events()
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 1,
self.block_length: 1,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: prefetch_input_elements,
})
for expected_element in self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 1, 1):
self.write_coordination_events[expected_element].set()
self.assertEqual(expected_element * expected_element,
sess.run(self.next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testSingleThreaded(self):
self._testSingleThreaded()
def testSingleThreadedSloppy(self):
self._testSingleThreaded(sloppy=True)
def testSingleThreadedPrefetch1Itr(self):
self._testSingleThreaded(prefetch_input_elements=1)
def testSingleThreadedPrefetch1ItrSloppy(self):
self._testSingleThreaded(prefetch_input_elements=1, sloppy=True)
def testSingleThreadedRagged(self):
# Tests a sequence with wildly different elements per iterator.
with self.test_session() as sess:
self._clear_coordination_events()
sess.run(
self.init_op,
feed_dict={
self.input_values: [3, 7, 4],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: False,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 1,
})
# Add coordination values for 3 and 7
self.read_coordination_events[3] = threading.Semaphore(0)
self.write_coordination_events[3] = threading.Event()
self.read_coordination_events[7] = threading.Semaphore(0)
self.write_coordination_events[7] = threading.Event()
for expected_element in self._interleave(
[[3] * 3, [7] * 7, [4] * 4] * self.repeat_count, 2, 1):
self.write_coordination_events[expected_element].set()
output = sess.run(self.next_element)
self.assertEqual(expected_element * expected_element, output)
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def _testTwoThreadsNoContention(self, sloppy=False):
# num_threads > 1.
# Explicit coordination should result in `Dataset.interleave()` behavior
with self.test_session() as sess:
self._clear_coordination_events()
done_first_event = False
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 1,
})
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
1)):
self.write_coordination_events[expected_element].set()
if done_first_event: # First event starts the worker threads.
self.read_coordination_events[expected_element].acquire()
actual_element = sess.run(self.next_element)
if not done_first_event:
self.read_coordination_events[expected_element].acquire()
done_first_event = True
self.assertEqual(expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testTwoThreadsNoContention(self):
self._testTwoThreadsNoContention()
def testTwoThreadsNoContentionSloppy(self):
self._testTwoThreadsNoContention(sloppy=True)
def _testTwoThreadsNoContentionWithRaces(self, sloppy=False):
"""Tests where all the workers race in producing elements.
Note: this is in contrast with the prevous test which carefully sequences
the execution of the map functions.
Args:
sloppy: Whether to be sloppy or not.
"""
with self.test_session() as sess:
self._clear_coordination_events()
done_first_event = False
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 1,
})
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
1)):
if done_first_event: # First event starts the worker threads.
self._allow_all_map_threads()
self.read_coordination_events[expected_element].acquire()
else:
self.write_coordination_events[expected_element].set()
time.sleep(0.5) # Sleep to consistently "avoid" the race condition.
actual_element = sess.run(self.next_element)
if not done_first_event:
done_first_event = True
self.assertTrue(
self.read_coordination_events[expected_element].acquire(False))
self.assertEqual(expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testTwoThreadsNoContentionWithRaces(self):
self._testTwoThreadsNoContentionWithRaces()
def testTwoThreadsNoContentionWithRacesSloppy(self):
self._testTwoThreadsNoContentionWithRaces(sloppy=True)
def _testTwoThreadsNoContentionBlockLength(self, sloppy=False):
# num_threads > 1.
# Explicit coordination should result in `Dataset.interleave()` behavior
with self.test_session() as sess:
self._clear_coordination_events()
done_first_event = False
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 2,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 1,
})
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
2)):
self.write_coordination_events[expected_element].set()
if done_first_event: # First event starts the worker threads.
self.read_coordination_events[expected_element].acquire()
actual_element = sess.run(self.next_element)
if not done_first_event:
done_first_event = True
self.read_coordination_events[expected_element].acquire()
self.assertEqual(expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testTwoThreadsNoContentionBlockLength(self):
self._testTwoThreadsNoContentionBlockLength()
def testTwoThreadsNoContentionBlockLengthSloppy(self):
self._testTwoThreadsNoContentionBlockLength(sloppy=True)
def _testTwoThreadsNoContentionWithRacesAndBlocking(self, sloppy=False):
"""Tests where all the workers race in producing elements.
Note: this is in contrast with the prevous test which carefully sequences
the execution of the map functions.
Args:
sloppy: Whether to be sloppy or not.
"""
with self.test_session() as sess:
self._clear_coordination_events()
done_first_event = False
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 2,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 1,
})
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
2)):
if done_first_event: # First event starts the worker threads.
self._allow_all_map_threads()
self.read_coordination_events[expected_element].acquire()
else:
self.write_coordination_events[expected_element].set()
time.sleep(0.5) # Sleep to consistently "avoid" the race condition.
actual_element = sess.run(self.next_element)
if not done_first_event:
done_first_event = True
self.assertTrue(
self.read_coordination_events[expected_element].acquire(False))
self.assertEqual(expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testTwoThreadsNoContentionWithRacesAndBlocking(self):
self._testTwoThreadsNoContentionWithRacesAndBlocking()
def testTwoThreadsNoContentionWithRacesAndBlockingSloppy(self):
self._testTwoThreadsNoContentionWithRacesAndBlocking(sloppy=True)
def _testEmptyInput(self, sloppy=False):
with self.test_session() as sess:
# Empty input.
self._clear_coordination_events()
sess.run(
self.init_op,
feed_dict={
self.input_values: [],
self.cycle_length: 2,
self.block_length: 3,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 0,
})
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testEmptyInput(self):
self._testEmptyInput()
def testEmptyInputSloppy(self):
self._testEmptyInput(sloppy=True)
def _testNonEmptyInputIntoEmptyOutputs(self, sloppy=False):
# Non-empty input leading to empty output.
with self.test_session() as sess:
self._clear_coordination_events()
sess.run(
self.init_op,
feed_dict={
self.input_values: [0, 0, 0],
self.cycle_length: 2,
self.block_length: 3,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 0,
})
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testNonEmptyInputIntoEmptyOutputs(self):
self._testNonEmptyInputIntoEmptyOutputs()
def testNonEmptyInputIntoEmptyOutputsSloppy(self):
self._testNonEmptyInputIntoEmptyOutputs(sloppy=True)
def _testPartiallyEmptyOutputs(self, sloppy=False, prefetch_input_elements=1):
race_indices = {2, 8, 14} # Sequence points when sloppy mode has race conds
# Mixture of non-empty and empty interleaved datasets.
with self.test_session() as sess:
self._clear_coordination_events()
done_first_event = False
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 0, 6],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: prefetch_input_elements,
})
for i, expected_element in enumerate(
self._interleave([[4] * 4, [], [6] * 6] * self.repeat_count, 2, 1)):
self.write_coordination_events[expected_element].set()
# First event starts the worker threads. Additionally, when running the
# sloppy case with prefetch_input_elements=0, we get stuck if we wait
# for the read coordination event for certain event orderings in the
# presence of finishing iterators.
if done_first_event and not (sloppy and (i in race_indices)):
self.read_coordination_events[expected_element].acquire()
actual_element = sess.run(self.next_element)
if not done_first_event or (sloppy and (i in race_indices)):
done_first_event = True
self.read_coordination_events[expected_element].acquire()
self.assertEqual(expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
def testPartiallyEmptyOutputs(self):
self._testPartiallyEmptyOutputs()
def testPartiallyEmptyOutputsSloppy(self):
self._testPartiallyEmptyOutputs(sloppy=True, prefetch_input_elements=0)
def testDelayedOutputSloppy(self):
# Explicitly control the sequence of events to ensure we correctly avoid
# head-of-line blocking.
with self.test_session() as sess:
self._clear_coordination_events()
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: True,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 0,
})
mis_ordering = [
4, 4, 5, 4, 5, 5, 4, 5, 6, 6, 6, 5, 4, 4, 6, 6, 4, 4, 6, 5, 6, 6, 6,
6, 5, 5, 5, 5, 6, 6
]
for element in mis_ordering:
self.write_coordination_events[element].set()
self.assertEqual(element * element, sess.run(self.next_element))
self.assertTrue(self.read_coordination_events[element].acquire(False))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testBlockLengthWithContentionSloppy(self):
with self.test_session() as sess:
self._clear_coordination_events()
done_first_event = False
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: True,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 1,
})
# Test against a generating sequence that differs from the uncontended
# case, in order to prove sloppy correctness.
for i, expected_element in enumerate(
self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * self.repeat_count,
cycle_length=2,
block_length=3)):
self.write_coordination_events[expected_element].set()
if done_first_event: # First event starts the worker threads.
self.read_coordination_events[expected_element].acquire()
actual_element = sess.run(self.next_element)
if not done_first_event:
self.read_coordination_events[expected_element].acquire()
done_first_event = True
self.assertEqual(expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def _testEarlyExit(self, sloppy=False):
# Exiting without consuming all input should not block
with self.test_session() as sess:
self._clear_coordination_events()
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 3,
self.block_length: 2,
self.sloppy: sloppy,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 0,
})
for i in range(4, 7):
self.write_coordination_events[i].set()
elem = sess.run(self.next_element) # Start all workers
# Allow the one successful worker to progress beyond the py_func again.
elem = int(math.sqrt(elem))
self.write_coordination_events[elem].set()
self.read_coordination_events[elem].acquire()
# Allow the prefetch to succeed
for i in range(4, 7):
self.read_coordination_events[i].acquire()
self.write_coordination_events[i].set()
def testEarlyExit(self):
self._testEarlyExit()
def testEarlyExitSloppy(self):
self._testEarlyExit(sloppy=True)
def _testTooManyReaders(self, sloppy=False):
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(math_ops.cast(x, dtype=dtypes.int64))
return dataset
dataset = dataset_ops.Dataset.from_tensor_slices([4, 5, 6])
dataset = dataset.repeat(self.repeat_count)
dataset = dataset.apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length=16, block_length=2, sloppy=sloppy))
iterator = dataset.make_one_shot_iterator()
with self.test_session() as sess:
output_values = []
for _ in range(30):
output_values.append(sess.run(iterator.get_next()))
expected_values = self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 1, 2)
self.assertItemsEqual(output_values, expected_values)
def testTooManyReaders(self):
self._testTooManyReaders()
def testTooManyReadersSloppy(self):
self._testTooManyReaders(sloppy=True)
def testSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
dataset = dataset_ops.Dataset.range(10).map(_map_fn)
iterator = dataset.apply(
interleave_ops.parallel_interleave(
_interleave_fn, cycle_length=1)).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
for j in range(2):
expected = [i, 0] if j % 2 == 0 else [0, -i]
self.assertAllEqual(expected, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testErrorsInOutputFn(self):
with self.test_session() as sess:
self._clear_coordination_events()
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: False,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 0,
})
except_on_element_indices = set([3])
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
1)):
if i in except_on_element_indices:
self.error = ValueError()
self.write_coordination_events[expected_element].set()
with self.assertRaises(errors.InvalidArgumentError):
sess.run(self.next_element)
else:
self.write_coordination_events[expected_element].set()
actual_element = sess.run(self.next_element)
self.assertEqual(expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testErrorsInInputFn(self):
def map_py_fn(x):
if x == 5:
raise ValueError()
return x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(x)
return dataset
self.dataset = (
dataset_ops.Dataset.from_tensor_slices(self.input_values).map(map_fn)
.repeat(self.repeat_count).apply(
interleave_ops.parallel_interleave(interleave_fn, self.cycle_length,
self.block_length, self.sloppy,
self.buffer_output_elements,
self.prefetch_input_elements)))
self.iterator = self.dataset.make_initializable_iterator()
self.init_op = self.iterator.initializer
self.next_element = self.iterator.get_next()
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: False,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 0,
})
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5], [6] * 6] * self.repeat_count, 2, 1)):
if expected_element == 5:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(self.next_element)
else:
actual_element = sess.run(self.next_element)
self.assertEqual(expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
def testErrorsInInterleaveFn(self):
def map_py_fn(x):
if x == 5:
raise ValueError()
return x
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
y = script_ops.py_func(map_py_fn, [x], x.dtype)
dataset = dataset.repeat(y)
return dataset
self.dataset = (
dataset_ops.Dataset.from_tensor_slices(self.input_values)
.repeat(self.repeat_count).apply(
interleave_ops.parallel_interleave(interleave_fn, self.cycle_length,
self.block_length, self.sloppy,
self.buffer_output_elements,
self.prefetch_input_elements)))
self.iterator = self.dataset.make_initializable_iterator()
self.init_op = self.iterator.initializer
self.next_element = self.iterator.get_next()
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={
self.input_values: [4, 5, 6],
self.cycle_length: 2,
self.block_length: 1,
self.sloppy: False,
self.buffer_output_elements: 1,
self.prefetch_input_elements: 0,
})
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5], [6] * 6] * self.repeat_count, 2, 1)):
if expected_element == 5:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(self.next_element)
else:
actual_element = sess.run(self.next_element)
self.assertEqual(expected_element, actual_element,
"At index %s: %s expected, got: %s" %
(i, expected_element, actual_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.next_element)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "d51f16a05a2b682e789a2250524b93e3",
"timestamp": "",
"source": "github",
"line_count": 1001,
"max_line_length": 87,
"avg_line_length": 39.25174825174825,
"alnum_prop": 0.5978722862742104,
"repo_name": "ravindrapanda/tensorflow",
"id": "db8429512bf2bf944e67b65d185aca99477c86d3",
"size": "39980",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/data/python/kernel_tests/interleave_dataset_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7908"
},
{
"name": "C",
"bytes": "186817"
},
{
"name": "C++",
"bytes": "25164156"
},
{
"name": "CMake",
"bytes": "166422"
},
{
"name": "Go",
"bytes": "857510"
},
{
"name": "HTML",
"bytes": "568425"
},
{
"name": "Java",
"bytes": "317802"
},
{
"name": "JavaScript",
"bytes": "1399"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "227436"
},
{
"name": "Python",
"bytes": "22238905"
},
{
"name": "Shell",
"bytes": "338684"
},
{
"name": "TypeScript",
"bytes": "797972"
}
],
"symlink_target": ""
}
|
import imp
import os
from . import commands
from . import statusfile
from . import utils
from ..objects import testcase
# Use this to run several variants of the tests.
ALL_VARIANT_FLAGS = {
"default": [[]],
"stress": [["--stress-opt", "--always-opt"]],
"turbofan": [["--turbo", "--always-opt"]],
"nocrankshaft": [["--nocrankshaft"]],
}
# FAST_VARIANTS implies no --always-opt.
FAST_VARIANT_FLAGS = {
"default": [[]],
"stress": [["--stress-opt"]],
"turbofan": [["--turbo"]],
"nocrankshaft": [["--nocrankshaft"]],
}
ALL_VARIANTS = set(["default", "stress", "turbofan", "nocrankshaft"])
FAST_VARIANTS = set(["default", "turbofan"])
STANDARD_VARIANT = set(["default"])
class VariantGenerator(object):
def __init__(self, suite, variants):
self.suite = suite
self.all_variants = ALL_VARIANTS & variants
self.fast_variants = FAST_VARIANTS & variants
self.standard_variant = STANDARD_VARIANT & variants
def FilterVariantsByTest(self, testcase):
if testcase.outcomes and statusfile.OnlyStandardVariant(
testcase.outcomes):
return self.standard_variant
if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
return self.fast_variants
return self.all_variants
def GetFlagSets(self, testcase, variant):
if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
return FAST_VARIANT_FLAGS[variant]
else:
return ALL_VARIANT_FLAGS[variant]
class TestSuite(object):
@staticmethod
def LoadTestSuite(root):
name = root.split(os.path.sep)[-1]
f = None
try:
(f, pathname, description) = imp.find_module("testcfg", [root])
module = imp.load_module("testcfg", f, pathname, description)
return module.GetSuite(name, root)
except:
# Use default if no testcfg is present.
return GoogleTestSuite(name, root)
finally:
if f:
f.close()
def __init__(self, name, root):
self.name = name # string
self.root = root # string containing path
self.tests = None # list of TestCase objects
self.rules = None # dictionary mapping test path to list of outcomes
self.wildcards = None # dictionary mapping test paths to list of outcomes
self.total_duration = None # float, assigned on demand
def shell(self):
return "d8"
def suffix(self):
return ".js"
def status_file(self):
return "%s/%s.status" % (self.root, self.name)
# Used in the status file and for stdout printing.
def CommonTestName(self, testcase):
if utils.IsWindows():
return testcase.path.replace("\\", "/")
else:
return testcase.path
def ListTests(self, context):
raise NotImplementedError
def _VariantGeneratorFactory(self):
"""The variant generator class to be used."""
return VariantGenerator
def CreateVariantGenerator(self, variants):
"""Return a generator for the testing variants of this suite.
Args:
variants: List of variant names to be run as specified by the test
runner.
Returns: An object of type VariantGenerator.
"""
return self._VariantGeneratorFactory()(self, set(variants))
def DownloadData(self):
pass
def ReadStatusFile(self, variables):
(self.rules, self.wildcards) = \
statusfile.ReadStatusFile(self.status_file(), variables)
def ReadTestCases(self, context):
self.tests = self.ListTests(context)
@staticmethod
def _FilterFlaky(flaky, mode):
return (mode == "run" and not flaky) or (mode == "skip" and flaky)
@staticmethod
def _FilterSlow(slow, mode):
return (mode == "run" and not slow) or (mode == "skip" and slow)
@staticmethod
def _FilterPassFail(pass_fail, mode):
return (mode == "run" and not pass_fail) or (mode == "skip" and pass_fail)
def FilterTestCasesByStatus(self, warn_unused_rules,
flaky_tests="dontcare",
slow_tests="dontcare",
pass_fail_tests="dontcare"):
filtered = []
used_rules = set()
for t in self.tests:
flaky = False
slow = False
pass_fail = False
testname = self.CommonTestName(t)
if testname in self.rules:
used_rules.add(testname)
# Even for skipped tests, as the TestCase object stays around and
# PrintReport() uses it.
t.outcomes = self.rules[testname]
if statusfile.DoSkip(t.outcomes):
continue # Don't add skipped tests to |filtered|.
for outcome in t.outcomes:
if outcome.startswith('Flags: '):
t.flags += outcome[7:].split()
flaky = statusfile.IsFlaky(t.outcomes)
slow = statusfile.IsSlow(t.outcomes)
pass_fail = statusfile.IsPassOrFail(t.outcomes)
skip = False
for rule in self.wildcards:
assert rule[-1] == '*'
if testname.startswith(rule[:-1]):
used_rules.add(rule)
t.outcomes |= self.wildcards[rule]
if statusfile.DoSkip(t.outcomes):
skip = True
break # "for rule in self.wildcards"
flaky = flaky or statusfile.IsFlaky(t.outcomes)
slow = slow or statusfile.IsSlow(t.outcomes)
pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes)
if (skip or self._FilterFlaky(flaky, flaky_tests)
or self._FilterSlow(slow, slow_tests)
or self._FilterPassFail(pass_fail, pass_fail_tests)):
continue # "for t in self.tests"
filtered.append(t)
self.tests = filtered
if not warn_unused_rules:
return
for rule in self.rules:
if rule not in used_rules:
print("Unused rule: %s -> %s" % (rule, self.rules[rule]))
for rule in self.wildcards:
if rule not in used_rules:
print("Unused rule: %s -> %s" % (rule, self.wildcards[rule]))
def FilterTestCasesByArgs(self, args):
"""Filter test cases based on command-line arguments.
An argument with an asterisk in the end will match all test cases
that have the argument as a prefix. Without asterisk, only exact matches
will be used with the exeption of the test-suite name as argument.
"""
filtered = []
globs = []
exact_matches = []
for a in args:
argpath = a.split('/')
if argpath[0] != self.name:
continue
if len(argpath) == 1 or (len(argpath) == 2 and argpath[1] == '*'):
return # Don't filter, run all tests in this suite.
path = '/'.join(argpath[1:])
if path[-1] == '*':
path = path[:-1]
globs.append(path)
else:
exact_matches.append(path)
for t in self.tests:
for a in globs:
if t.path.startswith(a):
filtered.append(t)
break
for a in exact_matches:
if t.path == a:
filtered.append(t)
break
self.tests = filtered
def GetFlagsForTestCase(self, testcase, context):
raise NotImplementedError
def GetSourceForTest(self, testcase):
return "(no source available)"
def IsFailureOutput(self, output, testpath):
return output.exit_code != 0
def IsNegativeTest(self, testcase):
return False
def HasFailed(self, testcase):
execution_failed = self.IsFailureOutput(testcase.output, testcase.path)
if self.IsNegativeTest(testcase):
return not execution_failed
else:
return execution_failed
def GetOutcome(self, testcase):
if testcase.output.HasCrashed():
return statusfile.CRASH
elif testcase.output.HasTimedOut():
return statusfile.TIMEOUT
elif self.HasFailed(testcase):
return statusfile.FAIL
else:
return statusfile.PASS
def HasUnexpectedOutput(self, testcase):
outcome = self.GetOutcome(testcase)
return not outcome in (testcase.outcomes or [statusfile.PASS])
def StripOutputForTransmit(self, testcase):
if not self.HasUnexpectedOutput(testcase):
testcase.output.stdout = ""
testcase.output.stderr = ""
def CalculateTotalDuration(self):
self.total_duration = 0.0
for t in self.tests:
self.total_duration += t.duration
return self.total_duration
class StandardVariantGenerator(VariantGenerator):
def FilterVariantsByTest(self, testcase):
return self.standard_variant
class GoogleTestSuite(TestSuite):
def __init__(self, name, root):
super(GoogleTestSuite, self).__init__(name, root)
def ListTests(self, context):
shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
if utils.IsWindows():
shell += ".exe"
output = commands.Execute(context.command_prefix +
[shell, "--gtest_list_tests"] +
context.extra_flags)
if output.exit_code != 0:
print output.stdout
print output.stderr
raise Exception("Test executable failed to list the tests.")
tests = []
test_case = ''
for line in output.stdout.splitlines():
test_desc = line.strip().split()[0]
if test_desc.endswith('.'):
test_case = test_desc
elif test_case and test_desc:
test = testcase.TestCase(self, test_case + test_desc, dependency=None)
tests.append(test)
tests.sort()
return tests
def GetFlagsForTestCase(self, testcase, context):
return (testcase.flags + ["--gtest_filter=" + testcase.path] +
["--gtest_random_seed=%s" % context.random_seed] +
["--gtest_print_time=0"] +
context.mode_flags)
def _VariantGeneratorFactory(self):
return StandardVariantGenerator
def shell(self):
return self.name
|
{
"content_hash": "451b8785c11fbe734a6af4fbdea335f4",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 78,
"avg_line_length": 31.455737704918032,
"alnum_prop": 0.6394621638524077,
"repo_name": "dreamllq/node",
"id": "e0fff0d11a3d176b3faaac43819e827c552fea0b",
"size": "11167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deps/v8/tools/testrunner/local/testsuite.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "11727"
},
{
"name": "C",
"bytes": "349188"
},
{
"name": "C++",
"bytes": "989660"
},
{
"name": "D",
"bytes": "21769"
},
{
"name": "DTrace",
"bytes": "15661"
},
{
"name": "Groff",
"bytes": "12642"
},
{
"name": "HTML",
"bytes": "163390"
},
{
"name": "JavaScript",
"bytes": "2638550"
},
{
"name": "Makefile",
"bytes": "17011"
},
{
"name": "PHP",
"bytes": "307541"
},
{
"name": "Python",
"bytes": "41656"
},
{
"name": "R",
"bytes": "3272"
},
{
"name": "Ruby",
"bytes": "2264"
},
{
"name": "Shell",
"bytes": "5907"
}
],
"symlink_target": ""
}
|
import falcon
import json
import logging
import time
from kafka import client
from kafka import common
from kafka import producer
from monasca.common import monasca_app
from monasca.common import utils
LOG = logging.getLogger(__name__)
@monasca_app.Resourcify
class KafkaDispatcher(object):
def __init__(self, conf):
LOG.debug('initializing KafkaDispatcher!')
self.conf = conf
self.drop_data = utils.conf_val(conf, 'drop_data', False)
self.topic = conf.get("kafka.topic", "monasca")
self.max_retry = utils.conf_val(conf, "kafka.max_retry", 1)
self.async = utils.conf_val(conf, "kafka.async", True)
self.compact = utils.conf_val(conf, "kafka.compact", True)
self.client = None
self.producer = None
self.kafka_server = conf.get("kafka.uri")
if self.kafka_server:
self._init_kafka()
def _init_kafka(self):
if self.kafka_server:
try:
if self.client:
self.client.reinit()
else:
self.client = client.KafkaClient(self.kafka_server)
self.producer = producer.SimpleProducer(self.client,
async=self.async,
ack_timeout=20)
LOG.debug("Successfully connected to Kafka server!")
except (common.KafkaUnavailableError, AttributeError,
Exception):
LOG.exception('Error occurred while connecting to Kafka.')
else:
LOG.error("Kafka server is not configured. Please use the "
"parameter kafka.uri, kafka.topic, kafka.max_retry to "
"configure kafka. Restart the server once it is "
"configured.")
@monasca_app.Restify(path='/v2.0/metrics', method='get')
def on_get_metrics(self, req, res):
res.body = 'Just some thing from kafka dispatcher here'
LOG.debug('Get request received.')
@monasca_app.Restify(path='/v2.0/metrics', method='post')
def on_post_metrics(self, req, res):
msg = ''
while True:
chunk = req.stream.read(1024)
if not chunk:
break
msg = msg.join(chunk)
if msg:
if self.drop_data:
res.status = falcon.HTTP_204
else:
for i in range(0, self.max_retry):
try:
if self.compact:
self.producer.send_messages(self.topic, msg)
else:
LOG.debug('Raw message: %s' % msg)
data = json.loads(msg)
if isinstance(data, list):
LOG.debug('%s Message(s) in the request' %
len(data))
for item in data:
self.producer.send_messages(
self.topic, json.dumps(item))
else:
self.producer.send_messages(self.topic, msg)
res.status = falcon.HTTP_204
LOG.debug('Message posted successfully.')
break
except ValueError:
LOG.exception('Message is not valid json.')
res.status = falcon.HTTP_406
break
except (common.KafkaUnavailableError, AttributeError,
Exception):
LOG.exception('Error occurred while posting data to '
'Kafka.')
res.status = falcon.HTTP_503
time.sleep(0.1)
self._init_kafka()
|
{
"content_hash": "4841815aca7225e3815ee4f42fbf3791",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 77,
"avg_line_length": 38.62135922330097,
"alnum_prop": 0.48366013071895425,
"repo_name": "litong01/python-monasca-api",
"id": "308a7cd19d1e5326cb17f4e867565fe76adf3a15",
"size": "4594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monasca/dispatcher/kafka_queue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21335"
}
],
"symlink_target": ""
}
|
import ROOT
import array
def set_ticks(can):
""" Display ticks on the upper and right axes.
Args:
can (ROOT.TCanvas): Canvas to set ticks on.
"""
can.SetTickx(1)
can.SetTicky(1)
def root_style(font=132):
""" Sets the style for ROOT plots. The SNO+ standard style is adapted
from a .C sent to collaboration.
Args:
font (int): Integer denoting the font style for plots. Default is 132.
See https://root.cern.ch/root/html/TAttText.html for details.
"""
hipStyle = ROOT.TStyle("clearRetro", "HIP plots style for publications")
# use plain black on white colors
hipStyle.SetFrameBorderMode(0)
hipStyle.SetCanvasBorderMode(0)
hipStyle.SetPadBorderMode(0)
hipStyle.SetPadBorderSize(0)
hipStyle.SetPadColor(0)
hipStyle.SetCanvasColor(0)
hipStyle.SetTitleColor(0)
hipStyle.SetStatColor(0)
# use bold lines
hipStyle.SetHistLineWidth(2)
hipStyle.SetLineWidth(2)
# no title, stats box or fit as default
hipStyle.SetOptTitle(0)
hipStyle.SetOptStat(0)
hipStyle.SetOptFit(0)
# postscript dashes
hipStyle.SetLineStyleString(2, "[12 12]") # postscript dashes
# text style and size
hipStyle.SetTextFont(font)
hipStyle.SetTextSize(0.24)
hipStyle.SetLabelFont(font, "x")
hipStyle.SetLabelFont(font, "y")
hipStyle.SetLabelFont(font, "z")
hipStyle.SetTitleFont(font, "x")
hipStyle.SetTitleFont(font, "y")
hipStyle.SetTitleFont(font, "z")
hipStyle.SetLegendFont(font)
hipStyle.SetLabelSize(0.04, "x")
hipStyle.SetTitleSize(0.05, "x")
hipStyle.SetTitleColor(1, "x")
hipStyle.SetLabelSize(0.04, "y")
hipStyle.SetTitleSize(0.05, "y")
hipStyle.SetTitleColor(1, "y")
hipStyle.SetLabelSize(0.04, "z")
hipStyle.SetTitleSize(0.05, "z")
hipStyle.SetTitleColor(1, "z")
# AXIS OFFSETS
hipStyle.SetTitleOffset(0.8, "x")
hipStyle.SetTitleOffset(0.8, "y")
hipStyle.SetTitleOffset(0.8, "z")
# Legends
hipStyle.SetLegendBorderSize(1)
# graphs - set default marker to cross, rather than .
hipStyle.SetMarkerStyle(2) # cross +
# Color palette
NRGBs = 5
NCont = 255
stops = array.array('d', [0.00, 0.34, 0.61, 0.84, 1.00])
red = array.array('d', [0.00, 0.00, 0.87, 1.00, 0.51])
green = array.array('d', [0.00, 0.81, 1.00, 0.20, 0.00])
blue = array.array('d', [0.51, 1.00, 0.12, 0.00, 0.00])
ROOT.TColor.CreateGradientColorTable(NRGBs, stops, red, green, blue, NCont)
hipStyle.SetNumberContours(NCont)
ROOT.gROOT.SetStyle("clearRetro")
|
{
"content_hash": "d03bdbac7d87e749508d4127b6d8d72d",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 79,
"avg_line_length": 29.770114942528735,
"alnum_prop": 0.6613899613899614,
"repo_name": "jwaterfield/echidna",
"id": "75b529d5ac843787a8c60ed2dda58814feaff56f",
"size": "2590",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "echidna/output/root_style.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "25098"
},
{
"name": "Python",
"bytes": "577666"
},
{
"name": "Smarty",
"bytes": "1522"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
import MMTK
from AlGDock.BAT import *
def _in_ring(ancestors):
"""
Returns a list of atoms in the same circular path as ancestors,
which can be a single atom or list of atoms.
"""
if isinstance(ancestors,MMTK.ChemicalObjects.Atom):
ancestors = [ancestors]
in_ring = []
for child in [a for a in ancestors[-1].bondedTo() if a not in ancestors[1:]]:
if child==ancestors[0]:
if len(ancestors)>2:
return ancestors
else:
return []
in_ring += _in_ring(ancestors+[child])
if len(in_ring)>0:
return in_ring
return []
def _join_sets(sets):
"""
Join together sets that have intersecting elements
:param sets: a list of sets
"""
while not _sets_are_unique(sets):
output_sets = []
for new_set in sets:
joined = False
for old_set in output_sets:
if len(new_set.intersection(old_set))>0:
output_sets.remove(old_set)
joined_set = old_set.union(new_set)
output_sets.append(joined_set)
joined = True
if not joined:
output_sets.append(new_set)
sets = output_sets
return sets
def _sets_are_unique(sets):
"""
Returns True if each set has no intersecting elements with every other set in the list
:param sets: a list of sets
"""
nsets = len(sets)
for a in range(nsets):
for b in range(a+1,nsets):
if len(sets[a].intersection(sets[b]))>0:
return False
return True
class identifier(converter):
"""
Identifies rings, rigid bodies, soft torsions
"""
def __init__(self, universe, molecule):
self.universe = universe
self.molecule = molecule
# Identify all unique rings
unique_paths = []
for ring in [set(_in_ring(a)) for a in self.molecule.atoms]:
if ring!=set() and (not ring in unique_paths):
unique_paths.append(ring)
self.rings = sorted(_join_sets(unique_paths), key=lambda r:len(r))
# Rigid bodies also include terminal atoms adjacent to ring atoms
# TO DO: include planar systems
rigid_bodies = []
for index in range(len(self.rings)):
attached_terminal_atoms = set(\
[a for a in self.molecule.atoms \
if len(a.bondedTo())==1 and a.bondedTo()[0] in self.rings[index]])
rigid_bodies.append(self.rings[index].union(attached_terminal_atoms))
self.rigid_bodies = sorted(_join_sets(rigid_bodies), key=lambda b:len(b))
# Choose terminal atom attached to the largest ring as the initial atom
if len(self.rings)>0:
attached_terminal_atoms = sorted(list(attached_terminal_atoms), \
key=lambda atom:atom.mass())
self._converter_setup(
initial_atom=attached_terminal_atoms[-1])
else:
self._converter_setup()
# Select soft torsions, primary torsions that are not in the same rigid body
softTorsionInd = []
for torsion_ind in set(self._firstTorsionInd):
body_id = [-1, -1, -1]
for atom_ind in [1,2]:
in_bodies = [self._torsionL[torsion_ind][atom_ind] in rigid_body \
for rigid_body in rigid_bodies]
if True in in_bodies:
body_id[atom_ind] = in_bodies.index(True)
else:
body_id[atom_ind] = -atom_ind
if body_id[1] != body_id[2]:
softTorsionInd.append(torsion_ind)
self._softTorsionInd = softTorsionInd
def setOccupancyTo(self, property='rings'):
for atom in self.molecule.atoms:
atom.occupancy = 0
if property=='rings' or property=='rigid_bodies':
for index in range(len(self.rings)):
for atom in getattr(self, property)[index]:
atom.occupancy = index+1
elif property=='soft_torsions':
for index in self._softTorsionInd:
for atom in self._torsionL[index]:
atom.occupancy = index+1
########
# MAIN #
########
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Bond-Angle-Torsion converter')
parser.add_argument('--database',
help='MMTK database that describes the molecule of interest',
default='ligand.db')
args = parser.parse_args()
import os.path
if args.database=='all':
import glob
dbFNs = glob.glob('*/*.db')
elif args.database=='random':
import glob
from random import randrange
dbFNs = glob.glob('*/*.db')
dbFNs = [dbFNs[randrange(len(dbFNs))]]
elif os.path.isfile(args.database):
dbFNs = [args.database]
else:
raise Exception('Database file %s not found'%args.database)
dirname = os.path.dirname(os.path.abspath(dbFNs[0]))
for FN in dbFNs:
print 'Loading', FN
dbFN = os.path.abspath(FN)
if os.path.dirname(dbFN)!=dirname:
raise Exception('Cannot change ligand directory in MMTK')
MMTK.Database.molecule_types.directory = os.path.dirname(dbFN)
universe = MMTK.Universe.InfiniteUniverse()
molecule = MMTK.Molecule(os.path.basename(dbFN))
universe.addObject(molecule)
self = identifier(universe, molecule)
print 'There are %d unique rings'%len(self.rings)
self.setOccupancyTo('soft_torsions')
# This rotates the last primary torsion
BAT = self.BAT(extended=True)
from random import randrange
torsion_ind = self._softTorsionInd[randrange(len(self._softTorsionInd))]
BAT_ind = len(BAT)-self.ntorsions+torsion_ind
confs = []
for torsion_offset in np.linspace(0,2*np.pi):
BAT_n = [BAT[ind] if ind!=BAT_ind else BAT[ind] + torsion_offset \
for ind in range(len(BAT))]
self.Cartesian(BAT_n)
confs.append(np.copy(self.universe.configuration().array))
import AlGDock.IO
IO_dcd = AlGDock.IO.dcd(molecule)
IO_dcd.write('rotation.dcd', confs)
self.showMolecule(dcdFN='rotation.dcd', colorBy='Occupancy')
os.remove('rotation.dcd')
|
{
"content_hash": "b2e5018b9e8e03db51aebcf88ab89557",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 88,
"avg_line_length": 32.201117318435756,
"alnum_prop": 0.6469465648854962,
"repo_name": "gkumar7/AlGDock",
"id": "59c41be7f75124e1a81fdbeda7419eadde8e7a3f",
"size": "5764",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "AlGDock/RigidBodies.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "127550"
},
{
"name": "CSS",
"bytes": "2136"
},
{
"name": "CoffeeScript",
"bytes": "13826"
},
{
"name": "JavaScript",
"bytes": "240611"
},
{
"name": "Mathematica",
"bytes": "9061"
},
{
"name": "Python",
"bytes": "834939"
},
{
"name": "Shell",
"bytes": "10278"
}
],
"symlink_target": ""
}
|
import player
import random
class Monster(player.Player):
def __init__(self, game, location, name, description):
player.Player.__init__(self, game, location)
self.name = name
self.description = description
# print self.actions
def get_input(self):
# print "I am in", self.location.name
if not self.playing:
return ""
player_present = [x for x in self.location.here
if x.__class__ == player.Player and x.playing]
if player_present:
return "attack " + player_present[0].name
if random.choice((0, 1)):
direction = random.choice(self.location.exits())
# print "Moving", direction
return "go " + direction
else:
return ""
# actions = ['look', 'get', 'attack']
def look(self, player, noun):
return [self.name, self.description]
def get(self, player, noun):
return ["The " + self.name + " growls at you."]
def send_results(self):
pass
if __name__ == '__main__':
import cave
cave1 = cave.Cave('Empty cave')
orc = Monster(cave1, 'orc', 'A generic dungeon monster')
print orc.actions
print orc.update_debug()
print orc.find_handler('go', 'north')
raw_input("Hit enter to continue...")
|
{
"content_hash": "ebf30767f2226ee416307073b9ef3db4",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 72,
"avg_line_length": 29.127659574468087,
"alnum_prop": 0.5522279035792549,
"repo_name": "geekbert/HelloPythonSourceCode",
"id": "24a577f1b9e67ab80c0716204702bf003dd86f9e",
"size": "1370",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chapter 10/mud-4/monster.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "238138"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url, include
try:
import importlib
except ImportError:
from django.utils import importlib
from allauth.socialaccount import providers
from . import app_settings
urlpatterns = [url('^', include('allauth.account.urls'))]
if app_settings.SOCIALACCOUNT_ENABLED:
urlpatterns += [url('^social/', include('allauth.socialaccount.urls'))]
for provider in providers.registry.get_list():
try:
prov_mod = importlib.import_module(provider.package + '.urls')
except ImportError:
continue
prov_urlpatterns = getattr(prov_mod, 'urlpatterns', None)
if prov_urlpatterns:
urlpatterns += prov_urlpatterns
|
{
"content_hash": "9109a441ee1fdc35a283db425059339f",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 75,
"avg_line_length": 28,
"alnum_prop": 0.7157738095238095,
"repo_name": "sih4sing5hong5/django-allauth",
"id": "e85d05dc129fdcd5d6f9c0630a31eb787d83cdc5",
"size": "672",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "allauth/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "42612"
},
{
"name": "JavaScript",
"bytes": "3967"
},
{
"name": "Python",
"bytes": "610210"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('venta', '0054_alumno'),
]
operations = [
migrations.DeleteModel(
name='Alumno',
),
migrations.RemoveField(
model_name='imagew',
name='service',
),
migrations.DeleteModel(
name='Imagew',
),
]
|
{
"content_hash": "dc2d0f75f2ef6d1ac7394d13dd536fe5",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 39,
"avg_line_length": 19.434782608695652,
"alnum_prop": 0.5324384787472036,
"repo_name": "vpadillar/pventa",
"id": "d6018ac285129efe140e7c48e7a8edbab8ff7d3b",
"size": "519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venta/migrations/0055_auto_20161106_0819.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "491"
},
{
"name": "CSS",
"bytes": "87140"
},
{
"name": "Groff",
"bytes": "76"
},
{
"name": "HTML",
"bytes": "47212"
},
{
"name": "JavaScript",
"bytes": "177804"
},
{
"name": "Python",
"bytes": "201594"
},
{
"name": "SQLPL",
"bytes": "1006"
}
],
"symlink_target": ""
}
|
"""The arguments helper for a database configuration."""
from plaso.lib import errors
from plaso.cli.helpers import interface
from plaso.cli.helpers import server_config
class DatabaseConfigHelper(interface.ArgumentsHelper):
"""CLI arguments helper class for database configuration."""
NAME = u'database_config'
DESCRIPTION = u'Argument helper for a database configuration.'
_DEFAULT_NAME = u'data'
_DEFAULT_PASSWORD = u'toor'
_DEFAULT_USERNAME = u'root'
@classmethod
def AddArguments(cls, argument_group):
"""Add command line arguments the helper supports to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group: the argparse group (instance of argparse._ArgumentGroup or
or argparse.ArgumentParser).
"""
argument_group.add_argument(
u'--user', dest=u'username', type=unicode, action=u'store',
default=None, metavar=u'USERNAME', required=False, help=(
u'The username used to connect to the database.'))
argument_group.add_argument(
u'--password', dest=u'password', type=unicode, action=u'store',
default=None, metavar=u'PASSWORD', help=(
u'The password for the database user.'))
argument_group.add_argument(
u'--db_name', '--db-name', dest=u'db_name', action=u'store',
type=unicode, default=None, required=False, help=(
u'The name of the database to connect to.'))
server_config.BaseServerConfigHelper.AddArguments(argument_group)
@classmethod
def ParseOptions(cls, options, output_module):
"""Parses and validates options.
Args:
options: the parser option object (instance of argparse.Namespace).
output_module: an output module (instance of OutputModule).
Raises:
BadConfigObject: when the output module object is of the wrong type.
BadConfigOption: when a configuration parameter fails validation.
"""
if not hasattr(output_module, u'SetCredentials'):
raise errors.BadConfigObject(u'Unable to set username information.')
username = getattr(options, u'username', None)
if not username:
username = cls._DEFAULT_USERNAME
password = getattr(options, u'password', None)
if not password:
password = cls._DEFAULT_PASSWORD
name = getattr(options, u'db_name', None)
if not name:
name = cls._DEFAULT_NAME
output_module.SetCredentials(
username=username, password=password)
output_module.SetDatabaseName(name)
server_config.BaseServerConfigHelper.ParseOptions(options, output_module)
|
{
"content_hash": "76b519e0186f5be54364804568952efb",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 80,
"avg_line_length": 36.41891891891892,
"alnum_prop": 0.6975881261595547,
"repo_name": "8u1a/plaso",
"id": "67cc0a902cb0a71d4b6719a1d305d1b87223d947",
"size": "2719",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "plaso/cli/helpers/database_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1276"
},
{
"name": "Makefile",
"bytes": "1151"
},
{
"name": "Protocol Buffer",
"bytes": "13930"
},
{
"name": "Python",
"bytes": "3179107"
},
{
"name": "Shell",
"bytes": "47305"
}
],
"symlink_target": ""
}
|
import os
import unittest
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.dataset.dataset_driver import ParticleDataHandler
from mi.dataset.driver.cg_cpm_eng.cpm.cg_cpm_eng_cpm_recovered_driver import parse
from mi.dataset.driver.cg_cpm_eng.cpm.resource import RESOURCE_PATH
__author__ = 'mworden'
log = get_logger()
@attr('UNIT', group='mi')
class DriverTest(unittest.TestCase):
def test_one(self):
source_file_path = os.path.join(RESOURCE_PATH, 'cpm_status.20140817_1255.txt')
particle_data_handler = ParticleDataHandler()
particle_data_handler = parse(None, source_file_path, particle_data_handler)
log.debug("SAMPLES: %s", particle_data_handler._samples)
log.debug("FAILURE: %s", particle_data_handler._failure)
self.assertEquals(particle_data_handler._failure, False)
if __name__ == '__main__':
test = DriverTest('test_one')
test.test_one()
|
{
"content_hash": "cf53c472b3ecc8ed4458affb41685c66",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 86,
"avg_line_length": 28.78787878787879,
"alnum_prop": 0.7073684210526315,
"repo_name": "oceanobservatories/mi-dataset",
"id": "69f48051c4b831b2a7e6b5f0e2e721b98942d7a2",
"size": "973",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mi/dataset/driver/cg_cpm_eng/cpm/test/test_cg_cpm_eng_cpm_recovered_driver.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4746"
},
{
"name": "Python",
"bytes": "3624358"
}
],
"symlink_target": ""
}
|
from django.utils.encoding import smart_str
from django.conf import settings
from hashlib import sha1
from mediagenerator.generators.bundles.base import Filter
from mediagenerator.utils import find_file, read_text_file, get_media_dirs
from subprocess import Popen, PIPE
import os
import sys
import re
import posixpath
_RE_FLAGS = re.MULTILINE | re.UNICODE
multi_line_comment_re = re.compile(r'/\*.*?\*/', _RE_FLAGS | re.DOTALL)
one_line_comment_re = re.compile(r'//.*', _RE_FLAGS)
import_re = re.compile(r'''@import\s* # import keyword
["'] # opening quote
(.+?) # the module name
["'] # closing quote
\s*; # statement terminator
''',
_RE_FLAGS | re.VERBOSE)
if not hasattr(os.path, 'relpath'):
# backport os.path.relpath from Python 2.6
# Copyright (c) 2001-2010 Python Software Foundation; All Rights Reserved
# Return the longest prefix of all list elements.
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = [x for x in os.path.abspath(start).split(os.path.sep) if x]
path_list = [x for x in os.path.abspath(path).split(os.path.sep) if x]
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
os.path.relpath = relpath
class Less(Filter):
takes_input = False
def __init__(self, **kwargs):
self.config(kwargs, path=(), main_module=None)
if isinstance(self.path, basestring):
self.path = (self.path,)
# we need to be able to mutate self.path,
self.path = list(self.path)
super(Less, self).__init__(**kwargs)
assert self.filetype == 'css', (
'Less only supports compilation to CSS. '
'The parent filter expects "%s".' % self.filetype)
assert self.main_module, \
'You must provide a main module'
# lessc can't cope with nonexistent directories, so filter them
media_dirs = [directory for directory in get_media_dirs()
if os.path.exists(directory)]
self.path += tuple(media_dirs)
self._compiled = None
self._compiled_hash = None
self._dependencies = {}
@classmethod
def from_default(cls, name):
return {'main_module': name}
def get_output(self, variation):
self._regenerate(debug=False)
yield self._compiled
def get_dev_output(self, name, variation):
assert name == self.main_module + '.css'
self._regenerate(debug=True)
return self._compiled
def get_dev_output_names(self, variation):
self._regenerate(debug=True)
yield self.main_module + '.css', self._compiled_hash
def _regenerate(self, debug=False):
if self._dependencies:
for name, mtime in self._dependencies.items():
path = self._find_file(name)
if not path or os.path.getmtime(path) != mtime:
# Just recompile everything
self._dependencies = {}
break
else:
# No changes
return
modules = [self.main_module]
# get all the transitive dependencies of this module
while True:
if not modules:
break
module_name = modules.pop()
path = self._find_file(module_name)
assert path, 'Could not find the Less module %s' % module_name
mtime = os.path.getmtime(path)
self._dependencies[module_name] = mtime
source = read_text_file(path)
dependencies = self._get_dependencies(source)
for name in dependencies:
# Try relative import, first
transformed = posixpath.join(posixpath.dirname(module_name), name)
path = self._find_file(transformed)
if path:
name = transformed
else:
path = self._find_file(name)
assert path, ('The Less module %s could not find the '
'dependency %s' % (module_name, name))
if name not in self._dependencies:
modules.append(name)
main_module_path = self._find_file(self.main_module)
self._compiled = self._compile(main_module_path, debug=debug)
self._compiled_hash = sha1(smart_str(self._compiled)).hexdigest()
def _compile(self, path, debug=False):
try:
relative_paths = [self._get_relative_path(directory)
for directory in self.path]
shell = sys.platform == 'win32'
cmd = Popen(['lessc',
'--include-path=%s' % ':'.join(relative_paths),
path],
stdin=PIPE, stdout=PIPE, stderr=PIPE,
shell=shell, universal_newlines=True,
cwd=settings.PROJECT_ROOT)
output, error = cmd.communicate()
# some lessc errors output to stdout, so we put both in the assertion message
assert cmd.wait() == 0, ('Less command returned bad '
'result:\n%s\n%s' % (error, output))
return output.decode('utf-8')
except Exception, e:
raise ValueError("Failed to run Less compiler for this "
"file. Please confirm that the \"lessc\" application is "
"on your path and that you can run it from your own command "
"line.\n"
"Error was: %s" % e)
def _get_dependencies(self, source):
clean_source = multi_line_comment_re.sub('\n', source)
clean_source = one_line_comment_re.sub('', clean_source)
return [name for name in import_re.findall(clean_source)
if not name.endswith('.css')]
def _find_file(self, name):
if not name.endswith('.less'):
name = name + '.less'
return find_file(name, media_dirs=self.path)
def _get_relative_path(self, abs_path):
"""Given an absolute path, return a path relative to the
project root.
>>> self._get_relative_path('/home/bob/bobs_project/subdir/foo')
'subdir/foo'
"""
relative_path = os.path.relpath(abs_path, settings.PROJECT_ROOT)
return relative_path
|
{
"content_hash": "14febc8a570b67604e52766e5999028a",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 89,
"avg_line_length": 36.41836734693877,
"alnum_prop": 0.5539366769403195,
"repo_name": "stucchio/Django-Media-Generator",
"id": "4c10f3c7cfb831aa9e4d8539eb0a3210677eb6f5",
"size": "7138",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "mediagenerator/filters/less.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "80814"
},
{
"name": "Ruby",
"bytes": "2070"
}
],
"symlink_target": ""
}
|
import UserDict
import bson.objectid
from .codecs import Codec
class BaseModel(UserDict.IterableUserDict):
fields = {}
def __init__(self, **kwargs):
'''All this simple constructor does is populate the instance's
field values from any of the provided keyword arguments.'''
self.data = {}
for k in kwargs:
self.__setitem__(k, kwargs[k])
def __getitem__(self, key):
'''
Returns the value of the requested field, and will decode the data in
the field if there is a codec associated with the field.
'''
# Make sure the key exists.
if key not in self.fields:
raise KeyError('No such field "{0}".'.format(key))
# See if there is a codec associated with the field, and if so,
# then decode the value on its way out.
if 'codec' in self.fields[key] and self.fields[key]['codec'] is not None:
codec = self.fields[key]['codec']()
if isinstance(codec, Codec):
if self.data[key] is not None:
return codec.decode(self.data[key])
else:
e = 'codec must be of type pureodm.codec.Codec, not {0}'
raise TypeError(e.format(type(codec)))
return self.data[key]
def __setitem__(self, key, value):
'''
Validates the value being set, as well as passes it through a
codec, if one is defined, for the field.'''
if key not in self.fields:
if key == '_id' and isinstance(value, bson.objectid.ObjectId):
self.fields[key] = {'type': bson.objectid.ObjectId,
'required': True}
self.__setitem__(key, value)
elif key != '_id' and not isinstance(value, bson.objectid.ObjectId):
e = 'Value must be of type bson.objectid.ObjectId.'
raise TypeError(e)
elif key not in self.fields:
raise KeyError('No such field "{0}".'.format(key))
# Check to see if the field has a codec associated with it, and if it
# does, then encode the value.
if 'codec' in self.fields[key] and self.fields[key]['codec'] is not None:
codec = self.fields[key]['codec']()
if isinstance(codec, Codec):
if value is not None:
value = codec.encode(value)
else:
e = 'codec must be of type pureodm.codec.Codec, not {0}'
raise TypeError(e.format(type(codec)))
field_type = self.fields[key]['type']
if isinstance(field_type, str) and isinstance(value, unicode):
# Favour unicode, over regular strings.
self.data[key] = value
elif isinstance(field_type, unicode) and isinstance(value, str):
# Upgrade to unicode, because reasons.
self.data[key] = unicode(value)
elif isinstance(field_type, list) and not isinstance(value, list):
# The caller is trying to set the value of a list field to
# something other than a list.
#
# For shame...
#
e = '"{0}" is a list field of {1}.'.format(key, field_type[0])
raise ValueError(e)
elif isinstance(field_type, list) and isinstance(value, list):
# Ensure that each element in the list ``value`` matches the
# sub-type of the list field.
for i in value:
if not isinstance(i, field_type[0]):
e = '"{0}" is meant to hold {1}, not {2}.'
raise ValueError(e.format(key, field_type[0], type(i)))
self.data[key] = value
elif isinstance(value, field_type):
# We were not passed a list, but the value the caller wants to set
# is of the same type as that in the key definition.
#
self.data[key] = value
elif value is None:
# In the event we are nullifying the value of a field, we want
# this to pass.
self.data[key] = None
else:
# Nope; type mismatch.
#
e = '{2}: Wanted {0}, got {1}.'
raise ValueError(e.format(field_type, type(value), key))
def __delitem__(self, key):
'''Instead of removing the item from the dictionary, this method
simply nullifies the value.
To nullify the value, this method - in turn - just calls
:meth:`self.__setitem__()` so that all of the type-checking is done in
one place.
'''
# Remember, this is a cleanliness thing.
self.__setitem__(key, None)
class EmbeddedModel(BaseModel):
pass
class Model(BaseModel):
def save_to(self, collection, **kwargs):
'''Does some remaining validation on the data in the model, then
saves it.'''
# Check to make sure an "_id" field was set. If one was not defined,
# and/or it just never had a value assigned, do so now.
#
if '_id' not in self.fields:
self.fields['_id'] = {
'type': bson.objectid.ObjectId,
'required': True
}
self.__setitem__('_id', bson.objectid.ObjectId())
# Run through the fields, and see if any of them that have a "default"
# callable set have been assigned a value, and if they haven't, then
# call the callable.
#
for i in self.fields:
if i not in self.data and 'default' in self.fields[i]:
# No value has been set, but there is a default value.
#
self.__setitem__(i, self.fields[i]['default']())
# Check to make sure none of the required fields have been left empty.
#
for i in self.fields:
if i not in self.data and 'required' in self.fields[i]:
if self.fields[i]['required']:
raise ValueError('"{0}" is a required field.'.format(i))
# Now, generate an update spec.
#
spec = dict([(i, self.data[i]) for i in self.fields if i in self.data])
# Do the update.
#
if collection is not None:
collection.save(spec, **kwargs)
else:
return spec
@classmethod
def find_in(cls, collection, terms=None, **kwargs):
'''Simply maps the keyword arguments to a call to `collection.find()`,
but will try to map the data from each document to the model.'''
results = collection.find(terms, **kwargs)
for i in results:
yield cls.map_from_result(i)
@classmethod
def find_one_in(cls, collection, terms=None, **kwargs):
'''Practically identical to find_in(), but only returns a single
object, as opposed to a list.'''
result = collection.find_one(terms, **kwargs)
if result is not None:
result = cls.map_from_result(result)
return result
@classmethod
def map_from_result(cls, result):
'''Creates (and returns) an instance of this model, and does its best
to map the fields in "result" to attributes in this class.'''
model = cls()
for field in result:
model[field] = result[field]
return model
|
{
"content_hash": "f8ab5fa732e0d82172790027a2f3d4ee",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 81,
"avg_line_length": 36.86,
"alnum_prop": 0.5552088985349973,
"repo_name": "nesv/pureodm",
"id": "f7e11b3f689371c8330b52f6db81ea9975b057d4",
"size": "7395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pureodm/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13766"
}
],
"symlink_target": ""
}
|
__author__ = 'sergey'
from dedupsqlfs.db.sqlite.table import Table
class TableOption( Table ):
_table_name = "option"
def create( self ):
c = self.getCursor()
# Create table
c.execute(
"CREATE TABLE IF NOT EXISTS `%s` (" % self._table_name+
"name TEXT NOT NULL PRIMARY KEY, "+
"value TEXT NULL"+
")"
)
return
def insert( self, name, value ):
self.startTimer()
cur = self.getCursor()
cur.execute("INSERT INTO `%s`(name, value) VALUES (?, ?)" % self._table_name, (name, value))
item = cur.lastrowid
self.stopTimer('insert')
return item
def update( self, name, value ):
"""
@return: count updated rows
@rtype: int
"""
self.startTimer()
cur = self.getCursor()
cur.execute("UPDATE `%s` SET value=? WHERE name=?" % self._table_name, (value, name))
count = cur.rowcount
self.stopTimer('update')
return count
def get( self, name, raw=False ):
self.startTimer()
cur = self.getCursor()
cur.execute("SELECT value FROM `%s` WHERE name=:name" % self._table_name,
{"name": name}
)
item = cur.fetchone()
if item:
if raw:
item = item["value"]
else:
item = item["value"].decode()
self.stopTimer('get')
return item
def getAll( self ):
self.startTimer()
cur = self.getCursor()
cur.execute("SELECT * FROM `%s`" % self._table_name)
items = cur.fetchall()
opts = {}
for item in items:
opts[ item["name"].decode() ] = item["value"].decode()
self.stopTimer('getAll')
return opts
pass
|
{
"content_hash": "86fd34dfabcdbaa793406d87bfb81df5",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 100,
"avg_line_length": 27.37313432835821,
"alnum_prop": 0.5059978189749182,
"repo_name": "sergey-dryabzhinsky/dedupsqlfs",
"id": "af0dbd35daabd063b0999c278f0c1d7af4445ba3",
"size": "1858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dedupsqlfs/db/sqlite/table/option.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5509796"
},
{
"name": "C++",
"bytes": "33360"
},
{
"name": "Cython",
"bytes": "107356"
},
{
"name": "Python",
"bytes": "1042676"
},
{
"name": "Shell",
"bytes": "1480"
}
],
"symlink_target": ""
}
|
"""
Tests which scan for certain occurrences in the code, they may not find
all of these occurrences but should catch almost all.
"""
from __future__ import division, absolute_import, print_function
import sys
if sys.version_info >= (3, 4):
from pathlib import Path
import ast
import tokenize
import numpy
from numpy.testing import run_module_suite
from numpy.testing.decorators import slow
class ParseCall(ast.NodeVisitor):
def __init__(self):
self.ls = []
def visit_Attribute(self, node):
ast.NodeVisitor.generic_visit(self, node)
self.ls.append(node.attr)
def visit_Name(self, node):
self.ls.append(node.id)
class FindFuncs(ast.NodeVisitor):
def __init__(self, filename):
super().__init__()
self.__filename = filename
def visit_Call(self, node):
p = ParseCall()
p.visit(node.func)
ast.NodeVisitor.generic_visit(self, node)
if p.ls[-1] == 'simplefilter' or p.ls[-1] == 'filterwarnings':
if node.args[0].s == "ignore":
raise AssertionError(
"ignore filter should not be used; found in "
"{} on line {}".format(self.__filename, node.lineno))
if p.ls[-1] == 'warn' and (
len(p.ls) == 1 or p.ls[-2] == 'warnings'):
if "testing/tests/test_warnings.py" is self.__filename:
# This file
return
# See if stacklevel exists:
if len(node.args) == 3:
return
args = {kw.arg for kw in node.keywords}
if "stacklevel" in args:
return
raise AssertionError(
"warnings should have an appropriate stacklevel; found in "
"{} on line {}".format(self.__filename, node.lineno))
@slow
def test_warning_calls():
# combined "ignore" and stacklevel error
base = Path(numpy.__file__).parent
for path in base.rglob("*.py"):
if base / "testing" in path.parents:
continue
if path == base / "__init__.py":
continue
if path == base / "random" / "__init__.py":
continue
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as file:
tree = ast.parse(file.read())
FindFuncs(path).visit(tree)
if __name__ == "__main__":
run_module_suite()
|
{
"content_hash": "9a15b4f45e882ca155aa13de8d896455",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 79,
"avg_line_length": 31.674418604651162,
"alnum_prop": 0.5172540381791483,
"repo_name": "mbayon/TFG-MachineLearning",
"id": "c5818d21ce420f73c25eecdf85c3e4ee7c865e0b",
"size": "2724",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "vbig/lib/python2.7/site-packages/numpy/tests/test_warnings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "24787"
},
{
"name": "Julia",
"bytes": "11103"
},
{
"name": "Matlab",
"bytes": "98571"
},
{
"name": "Perl",
"bytes": "716"
},
{
"name": "Python",
"bytes": "115284"
},
{
"name": "Shell",
"bytes": "643"
}
],
"symlink_target": ""
}
|
from .generic import *
from .utils import string_type
from .pdf import PdfFileReader, PdfFileWriter
from .pagerange import PageRange
from sys import version_info
if version_info < ( 3, 0 ):
from cStringIO import StringIO
StreamIO = StringIO
else:
from io import BytesIO
from io import FileIO as file
StreamIO = BytesIO
class _MergedPage(object):
"""
_MergedPage is used internally by PdfFileMerger to collect necessary
information on each page that is being merged.
"""
def __init__(self, pagedata, src, id):
self.src = src
self.pagedata = pagedata
self.out_pagedata = None
self.id = id
class PdfFileMerger(object):
"""
Initializes a PdfFileMerger object. PdfFileMerger merges multiple PDFs
into a single PDF. It can concatenate, slice, insert, or any combination
of the above.
See the functions :meth:`merge()<merge>` (or :meth:`append()<append>`)
and :meth:`write()<write>` for usage information.
:param bool strict: Determines whether user should be warned of all
problems and also causes some correctable problems to be fatal.
Defaults to ``True``.
"""
def __init__(self, strict=True):
self.inputs = []
self.pages = []
self.output = PdfFileWriter()
self.bookmarks = []
self.named_dests = []
self.id_count = 0
self.strict = strict
def merge(self, position, fileobj, bookmark=None, pages=None, import_bookmarks=True):
"""
Merges the pages from the given file into the output file at the
specified page number.
:param int position: The *page number* to insert this file. File will
be inserted after the given number.
:param fileobj: A File Object or an object that supports the standard read
and seek methods similar to a File Object. Could also be a
string representing a path to a PDF file.
:param str bookmark: Optionally, you may specify a bookmark to be applied at
the beginning of the included file by supplying the text of the bookmark.
:param pages: can be a :ref:`Page Range <page-range>` or a ``(start, stop[, step])`` tuple
to merge only the specified range of pages from the source
document into the output document.
:param bool import_bookmarks: You may prevent the source document's bookmarks
from being imported by specifying this as ``False``.
"""
# This parameter is passed to self.inputs.append and means
# that the stream used was created in this method.
my_file = False
# If the fileobj parameter is a string, assume it is a path
# and create a file object at that location. If it is a file,
# copy the file's contents into a BytesIO (or StreamIO) stream object; if
# it is a PdfFileReader, copy that reader's stream into a
# BytesIO (or StreamIO) stream.
# If fileobj is none of the above types, it is not modified
if type(fileobj) == string_type:
fileobj = file(fileobj, 'rb')
my_file = True
elif isinstance(fileobj, file):
fileobj.seek(0)
filecontent = fileobj.read()
fileobj = StreamIO(filecontent)
my_file = True
elif isinstance(fileobj, PdfFileReader):
orig_tell = fileobj.stream.tell()
fileobj.stream.seek(0)
filecontent = StreamIO(fileobj.stream.read())
fileobj.stream.seek(orig_tell) # reset the stream to its original location
fileobj = filecontent
my_file = True
# Create a new PdfFileReader instance using the stream
# (either file or BytesIO or StringIO) created above
pdfr = PdfFileReader(fileobj, strict=self.strict)
# Find the range of pages to merge.
if pages == None:
pages = (0, pdfr.getNumPages())
elif isinstance(pages, PageRange):
pages = pages.indices(pdfr.getNumPages())
elif not isinstance(pages, tuple):
raise TypeError('"pages" must be a tuple of (start, stop[, step])')
srcpages = []
if bookmark:
bookmark = Bookmark(TextStringObject(bookmark), NumberObject(self.id_count), NameObject('/Fit'))
outline = []
if import_bookmarks:
outline = pdfr.getOutlines()
outline = self._trim_outline(pdfr, outline, pages)
if bookmark:
self.bookmarks += [bookmark, outline]
else:
self.bookmarks += outline
dests = pdfr.namedDestinations
dests = self._trim_dests(pdfr, dests, pages)
self.named_dests += dests
# Gather all the pages that are going to be merged
for i in range(*pages):
pg = pdfr.getPage(i)
id = self.id_count
self.id_count += 1
mp = _MergedPage(pg, pdfr, id)
srcpages.append(mp)
self._associate_dests_to_pages(srcpages)
self._associate_bookmarks_to_pages(srcpages)
# Slice to insert the pages at the specified position
self.pages[position:position] = srcpages
# Keep track of our input files so we can close them later
self.inputs.append((fileobj, pdfr, my_file))
def append(self, fileobj, bookmark=None, pages=None, import_bookmarks=True):
"""
Identical to the :meth:`merge()<merge>` method, but assumes you want to concatenate
all pages onto the end of the file instead of specifying a position.
:param fileobj: A File Object or an object that supports the standard read
and seek methods similar to a File Object. Could also be a
string representing a path to a PDF file.
:param str bookmark: Optionally, you may specify a bookmark to be applied at
the beginning of the included file by supplying the text of the bookmark.
:param pages: can be a :ref:`Page Range <page-range>` or a ``(start, stop[, step])`` tuple
to merge only the specified range of pages from the source
document into the output document.
:param bool import_bookmarks: You may prevent the source document's bookmarks
from being imported by specifying this as ``False``.
"""
self.merge(len(self.pages), fileobj, bookmark, pages, import_bookmarks)
def write(self, fileobj):
"""
Writes all data that has been merged to the given output file.
:param fileobj: Output file. Can be a filename or any kind of
file-like object.
"""
my_file = False
if type(fileobj) in (str, str):
fileobj = file(fileobj, 'wb')
my_file = True
# Add pages to the PdfFileWriter
# The commented out line below was replaced with the two lines below it to allow PdfFileMerger to work with PyPdf 1.13
for page in self.pages:
self.output.addPage(page.pagedata)
page.out_pagedata = self.output.getReference(self.output._pages.getObject()["/Kids"][-1].getObject())
#idnum = self.output._objects.index(self.output._pages.getObject()["/Kids"][-1].getObject()) + 1
#page.out_pagedata = IndirectObject(idnum, 0, self.output)
# Once all pages are added, create bookmarks to point at those pages
self._write_dests()
self._write_bookmarks()
# Write the output to the file
self.output.write(fileobj)
if my_file:
fileobj.close()
def close(self):
"""
Shuts all file descriptors (input and output) and clears all memory
usage.
"""
self.pages = []
for fo, pdfr, mine in self.inputs:
if mine:
fo.close()
self.inputs = []
self.output = None
def addMetadata(self, infos):
"""
Add custom metadata to the output.
:param dict infos: a Python dictionary where each key is a field
and each value is your new metadata.
Example: ``{u'/Title': u'My title'}``
"""
self.output.addMetadata(infos)
def setPageLayout(self, layout):
"""
Set the page layout
:param str layout: The page layout to be used
Valid layouts are:
/NoLayout Layout explicitly not specified
/SinglePage Show one page at a time
/OneColumn Show one column at a time
/TwoColumnLeft Show pages in two columns, odd-numbered pages on the left
/TwoColumnRight Show pages in two columns, odd-numbered pages on the right
/TwoPageLeft Show two pages at a time, odd-numbered pages on the left
/TwoPageRight Show two pages at a time, odd-numbered pages on the right
"""
self.output.setPageLayout(layout)
def setPageMode(self, mode):
"""
Set the page mode.
:param str mode: The page mode to use.
Valid modes are:
/UseNone Do not show outlines or thumbnails panels
/UseOutlines Show outlines (aka bookmarks) panel
/UseThumbs Show page thumbnails panel
/FullScreen Fullscreen view
/UseOC Show Optional Content Group (OCG) panel
/UseAttachments Show attachments panel
"""
self.output.setPageMode(mode)
def _trim_dests(self, pdf, dests, pages):
"""
Removes any named destinations that are not a part of the specified
page set.
"""
new_dests = []
prev_header_added = True
for k, o in list(dests.items()):
for j in range(*pages):
if pdf.getPage(j).getObject() == o['/Page'].getObject():
o[NameObject('/Page')] = o['/Page'].getObject()
assert str(k) == str(o['/Title'])
new_dests.append(o)
break
return new_dests
def _trim_outline(self, pdf, outline, pages):
"""
Removes any outline/bookmark entries that are not a part of the
specified page set.
"""
new_outline = []
prev_header_added = True
for i, o in enumerate(outline):
if isinstance(o, list):
sub = self._trim_outline(pdf, o, pages)
if sub:
if not prev_header_added:
new_outline.append(outline[i-1])
new_outline.append(sub)
else:
prev_header_added = False
for j in range(*pages):
if pdf.getPage(j).getObject() == o['/Page'].getObject():
o[NameObject('/Page')] = o['/Page'].getObject()
new_outline.append(o)
prev_header_added = True
break
return new_outline
def _write_dests(self):
dests = self.named_dests
for v in dests:
pageno = None
pdf = None
if '/Page' in v:
for i, p in enumerate(self.pages):
if p.id == v['/Page']:
v[NameObject('/Page')] = p.out_pagedata
pageno = i
pdf = p.src
break
if pageno != None:
self.output.addNamedDestinationObject(v)
def _write_bookmarks(self, bookmarks=None, parent=None):
if bookmarks == None:
bookmarks = self.bookmarks
last_added = None
for b in bookmarks:
if isinstance(b, list):
self._write_bookmarks(b, last_added)
continue
pageno = None
pdf = None
if '/Page' in b:
for i, p in enumerate(self.pages):
if p.id == b['/Page']:
#b[NameObject('/Page')] = p.out_pagedata
args = [NumberObject(p.id), NameObject(b['/Type'])]
#nothing more to add
#if b['/Type'] == '/Fit' or b['/Type'] == '/FitB'
if b['/Type'] == '/FitH' or b['/Type'] == '/FitBH':
if '/Top' in b and not isinstance(b['/Top'], NullObject):
args.append(FloatObject(b['/Top']))
else:
args.append(FloatObject(0))
del b['/Top']
elif b['/Type'] == '/FitV' or b['/Type'] == '/FitBV':
if '/Left' in b and not isinstance(b['/Left'], NullObject):
args.append(FloatObject(b['/Left']))
else:
args.append(FloatObject(0))
del b['/Left']
elif b['/Type'] == '/XYZ':
if '/Left' in b and not isinstance(b['/Left'], NullObject):
args.append(FloatObject(b['/Left']))
else:
args.append(FloatObject(0))
if '/Top' in b and not isinstance(b['/Top'], NullObject):
args.append(FloatObject(b['/Top']))
else:
args.append(FloatObject(0))
if '/Zoom' in b and not isinstance(b['/Zoom'], NullObject):
args.append(FloatObject(b['/Zoom']))
else:
args.append(FloatObject(0))
del b['/Top'], b['/Zoom'], b['/Left']
elif b['/Type'] == '/FitR':
if '/Left' in b and not isinstance(b['/Left'], NullObject):
args.append(FloatObject(b['/Left']))
else:
args.append(FloatObject(0))
if '/Bottom' in b and not isinstance(b['/Bottom'], NullObject):
args.append(FloatObject(b['/Bottom']))
else:
args.append(FloatObject(0))
if '/Right' in b and not isinstance(b['/Right'], NullObject):
args.append(FloatObject(b['/Right']))
else:
args.append(FloatObject(0))
if '/Top' in b and not isinstance(b['/Top'], NullObject):
args.append(FloatObject(b['/Top']))
else:
args.append(FloatObject(0))
del b['/Left'], b['/Right'], b['/Bottom'], b['/Top']
b[NameObject('/A')] = DictionaryObject({NameObject('/S'): NameObject('/GoTo'), NameObject('/D'): ArrayObject(args)})
pageno = i
pdf = p.src
break
if pageno != None:
del b['/Page'], b['/Type']
last_added = self.output.addBookmarkDict(b, parent)
def _associate_dests_to_pages(self, pages):
for nd in self.named_dests:
pageno = None
np = nd['/Page']
if isinstance(np, NumberObject):
continue
for p in pages:
if np.getObject() == p.pagedata.getObject():
pageno = p.id
if pageno != None:
nd[NameObject('/Page')] = NumberObject(pageno)
else:
raise ValueError("Unresolved named destination '%s'" % (nd['/Title'],))
def _associate_bookmarks_to_pages(self, pages, bookmarks=None):
if bookmarks == None:
bookmarks = self.bookmarks
for b in bookmarks:
if isinstance(b, list):
self._associate_bookmarks_to_pages(pages, b)
continue
pageno = None
bp = b['/Page']
if isinstance(bp, NumberObject):
continue
for p in pages:
if bp.getObject() == p.pagedata.getObject():
pageno = p.id
if pageno != None:
b[NameObject('/Page')] = NumberObject(pageno)
else:
raise ValueError("Unresolved bookmark '%s'" % (b['/Title'],))
def findBookmark(self, bookmark, root=None):
if root == None:
root = self.bookmarks
for i, b in enumerate(root):
if isinstance(b, list):
res = self.findBookmark(bookmark, b)
if res:
return [i] + res
elif b == bookmark or b['/Title'] == bookmark:
return [i]
return None
def addBookmark(self, title, pagenum, parent=None):
"""
Add a bookmark to this PDF file.
:param str title: Title to use for this bookmark.
:param int pagenum: Page number this bookmark will point to.
:param parent: A reference to a parent bookmark to create nested
bookmarks.
"""
if parent == None:
iloc = [len(self.bookmarks)-1]
elif isinstance(parent, list):
iloc = parent
else:
iloc = self.findBookmark(parent)
dest = Bookmark(TextStringObject(title), NumberObject(pagenum), NameObject('/FitH'), NumberObject(826))
if parent == None:
self.bookmarks.append(dest)
else:
bmparent = self.bookmarks
for i in iloc[:-1]:
bmparent = bmparent[i]
npos = iloc[-1]+1
if npos < len(bmparent) and isinstance(bmparent[npos], list):
bmparent[npos].append(dest)
else:
bmparent.insert(npos, [dest])
return dest
def addNamedDestination(self, title, pagenum):
"""
Add a destination to the output.
:param str title: Title to use
:param int pagenum: Page number this destination points at.
"""
dest = Destination(TextStringObject(title), NumberObject(pagenum), NameObject('/FitH'), NumberObject(826))
self.named_dests.append(dest)
class OutlinesObject(list):
def __init__(self, pdf, tree, parent=None):
list.__init__(self)
self.tree = tree
self.pdf = pdf
self.parent = parent
def remove(self, index):
obj = self[index]
del self[index]
self.tree.removeChild(obj)
def add(self, title, pagenum):
pageRef = self.pdf.getObject(self.pdf._pages)['/Kids'][pagenum]
action = DictionaryObject()
action.update({
NameObject('/D') : ArrayObject([pageRef, NameObject('/FitH'), NumberObject(826)]),
NameObject('/S') : NameObject('/GoTo')
})
actionRef = self.pdf._addObject(action)
bookmark = TreeObject()
bookmark.update({
NameObject('/A'): actionRef,
NameObject('/Title'): createStringObject(title),
})
self.pdf._addObject(bookmark)
self.tree.addChild(bookmark)
def removeAll(self):
for child in [x for x in self.tree.children()]:
self.tree.removeChild(child)
self.pop()
|
{
"content_hash": "641897aeb2f3d9c15be048c36bc29a41",
"timestamp": "",
"source": "github",
"line_count": 525,
"max_line_length": 140,
"avg_line_length": 38.23619047619048,
"alnum_prop": 0.519378300288931,
"repo_name": "malexandre/python-xhtml2pdf-demo",
"id": "c8e6a6228ec655564367e63476be1c5684810f3a",
"size": "21598",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "PyPDF2/merger.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "720407"
},
{
"name": "C++",
"bytes": "2019"
},
{
"name": "CSS",
"bytes": "16419"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "Python",
"bytes": "4321122"
},
{
"name": "Shell",
"bytes": "4864"
}
],
"symlink_target": ""
}
|
"""Contains the core layers: Dense, Dropout.
Also contains their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
class Dense(base._Layer): # pylint: disable=protected-access
"""Densely-connected layer class.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the weight matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the weight matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(Dense, self).__init__(trainable=trainable, name=name, **kwargs)
self.units = units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.activity_regularizer = activity_regularizer
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape.ndims is None:
raise ValueError('Inputs to `Dense` should have known rank.')
if len(input_shape) < 2:
raise ValueError('Inputs to `Dense` should have rank >= 2.')
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
# Note that we set `trainable=True` because this is a trainable
# weight of the layer. If the layer is not trainable
# (self.trainable = False), the variable will not be added to
# tf.trainable_variables(), and self.trainable_weights will be empty.
self.kernel = vs.get_variable('kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = vs.get_variable('bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
def call(self, inputs):
shape = inputs.get_shape().as_list()
output_shape = shape[:-1] + [self.units]
if len(output_shape) > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel,
[[len(shape) - 1], [0]])
# Reshape the output back to the original ndim of the input.
outputs.set_shape(output_shape)
else:
outputs = standard_ops.matmul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if input_shape[-1].value is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
def dense(
inputs, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the densely-connected layer.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Note: if the `inputs` tensor has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `kernel`.
Arguments:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
"""
layer = Dense(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
class Dropout(base._Layer): # pylint: disable=protected-access
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}.
for behavior.
name: The name of the layer (string).
"""
def __init__(self, rate=0.5,
noise_shape=None,
seed=None,
name=None,
**kwargs):
super(Dropout, self).__init__(name=name, **kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
def call(self, inputs, training=False):
def dropped_inputs():
return nn.dropout(inputs, 1 - self.rate,
noise_shape=self.noise_shape,
seed=self.seed)
return utils.smart_cond(training,
dropped_inputs,
lambda: array_ops.identity(inputs))
def dropout(inputs,
rate=0.5,
noise_shape=None,
seed=None,
training=False,
name=None):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
inputs: Tensor input.
rate: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(apply dropout) or in inference mode (return the input untouched).
name: The name of the layer (string).
Returns:
Output tensor.
"""
layer = Dropout(rate, noise_shape=noise_shape, seed=seed, name=name)
return layer.apply(inputs, training=training)
# Aliases
FullyConnected = Dense
fully_connected = dense
|
{
"content_hash": "e82100da0a60bc63aac3c70365b1d514",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 79,
"avg_line_length": 40.325259515570934,
"alnum_prop": 0.6614896172987815,
"repo_name": "tntnatbry/tensorflow",
"id": "b16012a69d03f57fa02f30b04cd42c655d12693f",
"size": "12394",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/layers/core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7583"
},
{
"name": "C",
"bytes": "174703"
},
{
"name": "C++",
"bytes": "21511895"
},
{
"name": "CMake",
"bytes": "122876"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "786880"
},
{
"name": "HTML",
"bytes": "557007"
},
{
"name": "Java",
"bytes": "277432"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833840"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "36990"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64714"
},
{
"name": "Protocol Buffer",
"bytes": "197812"
},
{
"name": "Python",
"bytes": "17846923"
},
{
"name": "Shell",
"bytes": "319915"
},
{
"name": "TypeScript",
"bytes": "775401"
}
],
"symlink_target": ""
}
|
from stream_framework.tests.storage.base import TestBaseActivityStorageStorage
from stream_framework.storage.redis.activity_storage import RedisActivityStorage
class RedisActivityStorageTest(TestBaseActivityStorageStorage):
storage_cls = RedisActivityStorage
|
{
"content_hash": "facbe64d733705a2d4fb18e8bc87ed04",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 80,
"avg_line_length": 44.166666666666664,
"alnum_prop": 0.8754716981132076,
"repo_name": "SergioChan/Stream-Framework",
"id": "b2293d481e4890e53c962302636622f2074fecb5",
"size": "265",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "stream_framework/tests/storage/redis/activity_storage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "86131"
},
{
"name": "Nginx",
"bytes": "1796"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Puppet",
"bytes": "76558"
},
{
"name": "Python",
"bytes": "542365"
},
{
"name": "Ruby",
"bytes": "259164"
},
{
"name": "Shell",
"bytes": "8427"
}
],
"symlink_target": ""
}
|
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._compute_policies_operations import build_create_or_update_request, build_delete_request, build_get_request, build_list_by_account_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ComputePoliciesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.datalake.analytics.account.aio.DataLakeAnalyticsAccountManagementClient`'s
:attr:`compute_policies` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_account(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> AsyncIterable[_models.ComputePolicyListResult]:
"""Lists the Data Lake Analytics compute policies within the specified Data Lake Analytics
account. An account supports, at most, 50 policies.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ComputePolicyListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.datalake.analytics.account.models.ComputePolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-11-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.ComputePolicyListResult]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_account_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
template_url=self.list_by_account.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_by_account_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ComputePolicyListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_account.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/computePolicies"} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
account_name: str,
compute_policy_name: str,
parameters: _models.CreateOrUpdateComputePolicyParameters,
**kwargs: Any
) -> _models.ComputePolicy:
"""Creates or updates the specified compute policy. During update, the compute policy with the
specified name will be replaced with this new compute policy. An account supports, at most, 50
policies.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account.
:type account_name: str
:param compute_policy_name: The name of the compute policy to create or update.
:type compute_policy_name: str
:param parameters: Parameters supplied to create or update the compute policy. The max degree
of parallelism per job property, min priority per job property, or both must be present.
:type parameters:
~azure.mgmt.datalake.analytics.account.models.CreateOrUpdateComputePolicyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComputePolicy, or the result of cls(response)
:rtype: ~azure.mgmt.datalake.analytics.account.models.ComputePolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-11-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.ComputePolicy]
_json = self._serialize.body(parameters, 'CreateOrUpdateComputePolicyParameters')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
compute_policy_name=compute_policy_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ComputePolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/computePolicies/{computePolicyName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
account_name: str,
compute_policy_name: str,
**kwargs: Any
) -> _models.ComputePolicy:
"""Gets the specified Data Lake Analytics compute policy.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account.
:type account_name: str
:param compute_policy_name: The name of the compute policy to retrieve.
:type compute_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComputePolicy, or the result of cls(response)
:rtype: ~azure.mgmt.datalake.analytics.account.models.ComputePolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-11-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.ComputePolicy]
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
compute_policy_name=compute_policy_name,
api_version=api_version,
template_url=self.get.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ComputePolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/computePolicies/{computePolicyName}"} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
account_name: str,
compute_policy_name: str,
parameters: Optional[_models.UpdateComputePolicyParameters] = None,
**kwargs: Any
) -> _models.ComputePolicy:
"""Updates the specified compute policy.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account.
:type account_name: str
:param compute_policy_name: The name of the compute policy to update.
:type compute_policy_name: str
:param parameters: Parameters supplied to update the compute policy. Default value is None.
:type parameters: ~azure.mgmt.datalake.analytics.account.models.UpdateComputePolicyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ComputePolicy, or the result of cls(response)
:rtype: ~azure.mgmt.datalake.analytics.account.models.ComputePolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-11-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.ComputePolicy]
if parameters is not None:
_json = self._serialize.body(parameters, 'UpdateComputePolicyParameters')
else:
_json = None
request = build_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
compute_policy_name=compute_policy_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ComputePolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/computePolicies/{computePolicyName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
compute_policy_name: str,
**kwargs: Any
) -> None:
"""Deletes the specified compute policy from the specified Data Lake Analytics account.
:param resource_group_name: The name of the Azure resource group.
:type resource_group_name: str
:param account_name: The name of the Data Lake Analytics account.
:type account_name: str
:param compute_policy_name: The name of the compute policy to delete.
:type compute_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2019-11-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
account_name=account_name,
compute_policy_name=compute_policy_name,
api_version=api_version,
template_url=self.delete.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataLakeAnalytics/accounts/{accountName}/computePolicies/{computePolicyName}"} # type: ignore
|
{
"content_hash": "d7965364f60d643e0dbba4d4fea44bc7",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 222,
"avg_line_length": 45.76555023923445,
"alnum_prop": 0.6454260324098275,
"repo_name": "Azure/azure-sdk-for-python",
"id": "5fd3e3ac4c0562185386ed6ea5978dabe5676f9c",
"size": "19630",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/datalake/azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/account/aio/operations/_compute_policies_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import test
class GroupsV3TestJSON(base.BaseIdentityV3AdminTest):
@test.attr(type='smoke')
def test_group_create_update_get(self):
name = data_utils.rand_name('Group')
description = data_utils.rand_name('Description')
group = self.client.create_group(name,
description=description)
self.addCleanup(self.client.delete_group, group['id'])
self.assertEqual(group['name'], name)
self.assertEqual(group['description'], description)
new_name = data_utils.rand_name('UpdateGroup')
new_desc = data_utils.rand_name('UpdateDescription')
updated_group = self.client.update_group(group['id'],
name=new_name,
description=new_desc)
self.assertEqual(updated_group['name'], new_name)
self.assertEqual(updated_group['description'], new_desc)
new_group = self.client.get_group(group['id'])
self.assertEqual(group['id'], new_group['id'])
self.assertEqual(new_name, new_group['name'])
self.assertEqual(new_desc, new_group['description'])
@test.attr(type='smoke')
def test_group_users_add_list_delete(self):
name = data_utils.rand_name('Group')
group = self.client.create_group(name)
self.addCleanup(self.client.delete_group, group['id'])
# add user into group
users = []
for i in range(3):
name = data_utils.rand_name('User')
user = self.client.create_user(name)
users.append(user)
self.addCleanup(self.client.delete_user, user['id'])
self.client.add_group_user(group['id'], user['id'])
# list users in group
group_users = self.client.list_group_users(group['id'])
self.assertEqual(sorted(users), sorted(group_users))
# delete user in group
for user in users:
self.client.delete_group_user(group['id'],
user['id'])
group_users = self.client.list_group_users(group['id'])
self.assertEqual(len(group_users), 0)
@test.attr(type='smoke')
def test_list_user_groups(self):
# create a user
user = self.client.create_user(
data_utils.rand_name('User-'),
password=data_utils.rand_name('Pass-'))
self.addCleanup(self.client.delete_user, user['id'])
# create two groups, and add user into them
groups = []
for i in range(2):
name = data_utils.rand_name('Group-')
group = self.client.create_group(name)
groups.append(group)
self.addCleanup(self.client.delete_group, group['id'])
self.client.add_group_user(group['id'], user['id'])
# list groups which user belongs to
user_groups = self.client.list_user_groups(user['id'])
self.assertEqual(sorted(groups), sorted(user_groups))
self.assertEqual(2, len(user_groups))
@test.attr(type='smoke')
def test_list_groups(self):
# Test to list groups
group_ids = list()
fetched_ids = list()
for _ in range(3):
name = data_utils.rand_name('Group')
description = data_utils.rand_name('Description')
group = self.client.create_group(name,
description=description)
self.addCleanup(self.client.delete_group, group['id'])
group_ids.append(group['id'])
# List and Verify Groups
body = self.client.list_groups()
for g in body:
fetched_ids.append(g['id'])
missing_groups = [g for g in group_ids if g not in fetched_ids]
self.assertEqual([], missing_groups)
|
{
"content_hash": "ff5e8b554a684736e84a63cd2bda1e52",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 71,
"avg_line_length": 42.608695652173914,
"alnum_prop": 0.5793367346938776,
"repo_name": "Vaidyanath/tempest",
"id": "d4a83e29b7e7ae5d2e83cf822580b6b925af7ca8",
"size": "4545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/identity/admin/v3/test_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "695"
},
{
"name": "Python",
"bytes": "2788179"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
}
|
"""
Implement transformation on Numba IR
"""
from collections import namedtuple, defaultdict
import logging
from numba.core.analysis import compute_cfg_from_blocks, find_top_level_loops
from numba.core import errors, ir, ir_utils
from numba.core.analysis import compute_use_defs
_logger = logging.getLogger(__name__)
def _extract_loop_lifting_candidates(cfg, blocks):
"""
Returns a list of loops that are candidate for loop lifting
"""
# check well-formed-ness of the loop
def same_exit_point(loop):
"all exits must point to the same location"
outedges = set()
for k in loop.exits:
succs = set(x for x, _ in cfg.successors(k))
if not succs:
# If the exit point has no successor, it contains an return
# statement, which is not handled by the looplifting code.
# Thus, this loop is not a candidate.
_logger.debug("return-statement in loop.")
return False
outedges |= succs
ok = len(outedges) == 1
_logger.debug("same_exit_point=%s (%s)", ok, outedges)
return ok
def one_entry(loop):
"there is one entry"
ok = len(loop.entries) == 1
_logger.debug("one_entry=%s", ok)
return ok
def cannot_yield(loop):
"cannot have yield inside the loop"
insiders = set(loop.body) | set(loop.entries) | set(loop.exits)
for blk in map(blocks.__getitem__, insiders):
for inst in blk.body:
if isinstance(inst, ir.Assign):
if isinstance(inst.value, ir.Yield):
_logger.debug("has yield")
return False
_logger.debug("no yield")
return True
_logger.info('finding looplift candidates')
# the check for cfg.entry_point in the loop.entries is to prevent a bad
# rewrite where a prelude for a lifted loop would get written into block -1
# if a loop entry were in block 0
candidates = []
for loop in find_top_level_loops(cfg):
_logger.debug("top-level loop: %s", loop)
if (same_exit_point(loop) and one_entry(loop) and cannot_yield(loop) and
cfg.entry_point() not in loop.entries):
candidates.append(loop)
_logger.debug("add candidate: %s", loop)
return candidates
def find_region_inout_vars(blocks, livemap, callfrom, returnto, body_block_ids):
"""Find input and output variables to a block region.
"""
inputs = livemap[callfrom]
outputs = livemap[returnto]
# ensure live variables are actually used in the blocks, else remove,
# saves having to create something valid to run through postproc
# to achieve similar
loopblocks = {}
for k in body_block_ids:
loopblocks[k] = blocks[k]
used_vars = set()
def_vars = set()
defs = compute_use_defs(loopblocks)
for vs in defs.usemap.values():
used_vars |= vs
for vs in defs.defmap.values():
def_vars |= vs
used_or_defined = used_vars | def_vars
# note: sorted for stable ordering
inputs = sorted(set(inputs) & used_or_defined)
outputs = sorted(set(outputs) & used_or_defined & def_vars)
return inputs, outputs
_loop_lift_info = namedtuple('loop_lift_info',
'loop,inputs,outputs,callfrom,returnto')
def _loop_lift_get_candidate_infos(cfg, blocks, livemap):
"""
Returns information on looplifting candidates.
"""
loops = _extract_loop_lifting_candidates(cfg, blocks)
loopinfos = []
for loop in loops:
[callfrom] = loop.entries # requirement checked earlier
an_exit = next(iter(loop.exits)) # anyone of the exit block
if len(loop.exits) > 1:
# Pre-Py3.8 may have multiple exits
[(returnto, _)] = cfg.successors(an_exit) # requirement checked earlier
else:
# Post-Py3.8 DO NOT have multiple exits
returnto = an_exit
local_block_ids = set(loop.body) | set(loop.entries)
inputs, outputs = find_region_inout_vars(
blocks=blocks,
livemap=livemap,
callfrom=callfrom,
returnto=returnto,
body_block_ids=local_block_ids,
)
lli = _loop_lift_info(loop=loop, inputs=inputs, outputs=outputs,
callfrom=callfrom, returnto=returnto)
loopinfos.append(lli)
return loopinfos
def _loop_lift_modify_call_block(liftedloop, block, inputs, outputs, returnto):
"""
Transform calling block from top-level function to call the lifted loop.
"""
scope = block.scope
loc = block.loc
blk = ir.Block(scope=scope, loc=loc)
ir_utils.fill_block_with_call(
newblock=blk,
callee=liftedloop,
label_next=returnto,
inputs=inputs,
outputs=outputs,
)
return blk
def _loop_lift_prepare_loop_func(loopinfo, blocks):
"""
Inplace transform loop blocks for use as lifted loop.
"""
entry_block = blocks[loopinfo.callfrom]
scope = entry_block.scope
loc = entry_block.loc
# Lowering assumes the first block to be the one with the smallest offset
firstblk = min(blocks) - 1
blocks[firstblk] = ir_utils.fill_callee_prologue(
block=ir.Block(scope=scope, loc=loc),
inputs=loopinfo.inputs,
label_next=loopinfo.callfrom,
)
blocks[loopinfo.returnto] = ir_utils.fill_callee_epilogue(
block=ir.Block(scope=scope, loc=loc),
outputs=loopinfo.outputs,
)
def _loop_lift_modify_blocks(func_ir, loopinfo, blocks,
typingctx, targetctx, flags, locals):
"""
Modify the block inplace to call to the lifted-loop.
Returns a dictionary of blocks of the lifted-loop.
"""
from numba.core.dispatcher import LiftedLoop
# Copy loop blocks
loop = loopinfo.loop
loopblockkeys = set(loop.body) | set(loop.entries)
if len(loop.exits) > 1:
# Pre-Py3.8 may have multiple exits
loopblockkeys |= loop.exits
loopblocks = dict((k, blocks[k].copy()) for k in loopblockkeys)
# Modify the loop blocks
_loop_lift_prepare_loop_func(loopinfo, loopblocks)
# Create a new IR for the lifted loop
lifted_ir = func_ir.derive(blocks=loopblocks,
arg_names=tuple(loopinfo.inputs),
arg_count=len(loopinfo.inputs),
force_non_generator=True)
liftedloop = LiftedLoop(lifted_ir,
typingctx, targetctx, flags, locals)
# modify for calling into liftedloop
callblock = _loop_lift_modify_call_block(liftedloop, blocks[loopinfo.callfrom],
loopinfo.inputs, loopinfo.outputs,
loopinfo.returnto)
# remove blocks
for k in loopblockkeys:
del blocks[k]
# update main interpreter callsite into the liftedloop
blocks[loopinfo.callfrom] = callblock
return liftedloop
def loop_lifting(func_ir, typingctx, targetctx, flags, locals):
"""
Loop lifting transformation.
Given a interpreter `func_ir` returns a 2 tuple of
`(toplevel_interp, [loop0_interp, loop1_interp, ....])`
"""
blocks = func_ir.blocks.copy()
cfg = compute_cfg_from_blocks(blocks)
loopinfos = _loop_lift_get_candidate_infos(cfg, blocks,
func_ir.variable_lifetime.livemap)
loops = []
if loopinfos:
_logger.debug('loop lifting this IR with %d candidates:\n%s',
len(loopinfos), func_ir.dump_to_string())
for loopinfo in loopinfos:
lifted = _loop_lift_modify_blocks(func_ir, loopinfo, blocks,
typingctx, targetctx, flags, locals)
loops.append(lifted)
# Make main IR
main = func_ir.derive(blocks=blocks)
return main, loops
def canonicalize_cfg_single_backedge(blocks):
"""
Rewrite loops that have multiple backedges.
"""
cfg = compute_cfg_from_blocks(blocks)
newblocks = blocks.copy()
def new_block_id():
return max(newblocks.keys()) + 1
def has_multiple_backedges(loop):
count = 0
for k in loop.body:
blk = blocks[k]
edges = blk.terminator.get_targets()
# is a backedge?
if loop.header in edges:
count += 1
if count > 1:
# early exit
return True
return False
def yield_loops_with_multiple_backedges():
for lp in cfg.loops().values():
if has_multiple_backedges(lp):
yield lp
def replace_target(term, src, dst):
def replace(target):
return (dst if target == src else target)
if isinstance(term, ir.Branch):
return ir.Branch(cond=term.cond,
truebr=replace(term.truebr),
falsebr=replace(term.falsebr),
loc=term.loc)
elif isinstance(term, ir.Jump):
return ir.Jump(target=replace(term.target), loc=term.loc)
else:
assert not term.get_targets()
return term
def rewrite_single_backedge(loop):
"""
Add new tail block that gathers all the backedges
"""
header = loop.header
tailkey = new_block_id()
for blkkey in loop.body:
blk = newblocks[blkkey]
if header in blk.terminator.get_targets():
newblk = blk.copy()
# rewrite backedge into jumps to new tail block
newblk.body[-1] = replace_target(blk.terminator, header,
tailkey)
newblocks[blkkey] = newblk
# create new tail block
entryblk = newblocks[header]
tailblk = ir.Block(scope=entryblk.scope, loc=entryblk.loc)
# add backedge
tailblk.append(ir.Jump(target=header, loc=tailblk.loc))
newblocks[tailkey] = tailblk
for loop in yield_loops_with_multiple_backedges():
rewrite_single_backedge(loop)
return newblocks
def canonicalize_cfg(blocks):
"""
Rewrite the given blocks to canonicalize the CFG.
Returns a new dictionary of blocks.
"""
return canonicalize_cfg_single_backedge(blocks)
def with_lifting(func_ir, typingctx, targetctx, flags, locals):
"""With-lifting transformation
Rewrite the IR to extract all withs.
Only the top-level withs are extracted.
Returns the (the_new_ir, the_lifted_with_ir)
"""
from numba.core import postproc
def dispatcher_factory(func_ir, objectmode=False, **kwargs):
from numba.core.dispatcher import LiftedWith, ObjModeLiftedWith
myflags = flags.copy()
if objectmode:
# Lifted with-block cannot looplift
myflags.enable_looplift = False
# Lifted with-block uses object mode
myflags.enable_pyobject = True
myflags.force_pyobject = True
myflags.no_cpython_wrapper = False
cls = ObjModeLiftedWith
else:
cls = LiftedWith
return cls(func_ir, typingctx, targetctx, myflags, locals, **kwargs)
postproc.PostProcessor(func_ir).run() # ensure we have variable lifetime
assert func_ir.variable_lifetime
vlt = func_ir.variable_lifetime
blocks = func_ir.blocks.copy()
# find where with-contexts regions are
withs = find_setupwiths(blocks)
cfg = vlt.cfg
_legalize_withs_cfg(withs, cfg, blocks)
# For each with-regions, mutate them according to
# the kind of contextmanager
sub_irs = []
for (blk_start, blk_end) in withs:
body_blocks = []
for node in _cfg_nodes_in_region(cfg, blk_start, blk_end):
body_blocks.append(node)
_legalize_with_head(blocks[blk_start])
# Find the contextmanager
cmkind, extra = _get_with_contextmanager(func_ir, blocks, blk_start)
# Mutate the body and get new IR
sub = cmkind.mutate_with_body(func_ir, blocks, blk_start, blk_end,
body_blocks, dispatcher_factory,
extra)
sub_irs.append(sub)
if not sub_irs:
# Unchanged
new_ir = func_ir
else:
new_ir = func_ir.derive(blocks)
return new_ir, sub_irs
def _get_with_contextmanager(func_ir, blocks, blk_start):
"""Get the global object used for the context manager
"""
_illegal_cm_msg = "Illegal use of context-manager."
def get_var_dfn(var):
"""Get the definition given a variable"""
return func_ir.get_definition(var)
def get_ctxmgr_obj(var_ref):
"""Return the context-manager object and extra info.
The extra contains the arguments if the context-manager is used
as a call.
"""
# If the contextmanager used as a Call
dfn = func_ir.get_definition(var_ref)
if isinstance(dfn, ir.Expr) and dfn.op == 'call':
args = [get_var_dfn(x) for x in dfn.args]
kws = {k: get_var_dfn(v) for k, v in dfn.kws}
extra = {'args': args, 'kwargs': kws}
var_ref = dfn.func
else:
extra = None
ctxobj = ir_utils.guard(ir_utils.find_global_value, func_ir, var_ref)
# check the contextmanager object
if ctxobj is ir.UNDEFINED:
raise errors.CompilerError(
"Undefined variable used as context manager",
loc=blocks[blk_start].loc,
)
if ctxobj is None:
raise errors.CompilerError(_illegal_cm_msg, loc=dfn.loc)
return ctxobj, extra
# Scan the start of the with-region for the contextmanager
for stmt in blocks[blk_start].body:
if isinstance(stmt, ir.EnterWith):
var_ref = stmt.contextmanager
ctxobj, extra = get_ctxmgr_obj(var_ref)
if not hasattr(ctxobj, 'mutate_with_body'):
raise errors.CompilerError(
"Unsupported context manager in use",
loc=blocks[blk_start].loc,
)
return ctxobj, extra
# No contextmanager found?
raise errors.CompilerError(
"malformed with-context usage",
loc=blocks[blk_start].loc,
)
def _legalize_with_head(blk):
"""Given *blk*, the head block of the with-context, check that it doesn't
do anything else.
"""
counters = defaultdict(int)
for stmt in blk.body:
counters[type(stmt)] += 1
if counters.pop(ir.EnterWith) != 1:
raise errors.CompilerError(
"with's head-block must have exactly 1 ENTER_WITH",
loc=blk.loc,
)
if counters.pop(ir.Jump) != 1:
raise errors.CompilerError(
"with's head-block must have exactly 1 JUMP",
loc=blk.loc,
)
# Can have any number of del
counters.pop(ir.Del, None)
# There MUST NOT be any other statements
if counters:
raise errors.CompilerError(
"illegal statements in with's head-block",
loc=blk.loc,
)
def _cfg_nodes_in_region(cfg, region_begin, region_end):
"""Find the set of CFG nodes that are in the given region
"""
region_nodes = set()
stack = [region_begin]
while stack:
tos = stack.pop()
succs, _ = zip(*cfg.successors(tos))
nodes = set([node for node in succs
if node not in region_nodes and
node != region_end])
stack.extend(nodes)
region_nodes |= nodes
return region_nodes
def _legalize_withs_cfg(withs, cfg, blocks):
"""Verify the CFG of the with-context(s).
"""
doms = cfg.dominators()
postdoms = cfg.post_dominators()
# Verify that the with-context has no side-exits
for s, e in withs:
loc = blocks[s].loc
if s not in doms[e]:
# Not sure what condition can trigger this error.
msg = "Entry of with-context not dominating the exit."
raise errors.CompilerError(msg, loc=loc)
if e not in postdoms[s]:
msg = (
"Does not support with-context that contain branches "
"(i.e. break/return/raise) that can leave the with-context. "
"Details: exit of with-context not post-dominating the entry. "
)
raise errors.CompilerError(msg, loc=loc)
def find_setupwiths(blocks):
"""Find all top-level with.
Returns a list of ranges for the with-regions.
"""
def find_ranges(blocks):
for blk in blocks.values():
for ew in blk.find_insts(ir.EnterWith):
yield ew.begin, ew.end
def previously_occurred(start, known_ranges):
for a, b in known_ranges:
if s >= a and s < b:
return True
return False
known_ranges = []
for s, e in sorted(find_ranges(blocks)):
if not previously_occurred(s, known_ranges):
if e not in blocks:
# this's possible if there's an exit path in the with-block
raise errors.CompilerError(
'unsupported controlflow due to return/raise '
'statements inside with block'
)
assert s in blocks, 'starting offset is not a label'
known_ranges.append((s, e))
return known_ranges
|
{
"content_hash": "9944362e1e8e5cead40e2418e8617f2a",
"timestamp": "",
"source": "github",
"line_count": 526,
"max_line_length": 84,
"avg_line_length": 33.659695817490494,
"alnum_prop": 0.5885343123411466,
"repo_name": "sklam/numba",
"id": "d01747884ac309144274d8b49596faf53eb2ff35",
"size": "17705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numba/core/transforms.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6783"
},
{
"name": "C",
"bytes": "638283"
},
{
"name": "C++",
"bytes": "52741"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "7918676"
},
{
"name": "Shell",
"bytes": "7823"
}
],
"symlink_target": ""
}
|
"""Give names to parts of a numpy array."""
from typing import Iterable, List, Mapping, MutableMapping, Tuple, Union
import numpy as np
def lengths_to_ranges(
lengths: Mapping[str, int]) -> MutableMapping[str, List[int]]:
"""Eg: {a: 2, b: 3} -> {a: [0, 1], b: [2, 3, 4]} ."""
ranges = {}
start = 0
for key, length in lengths.items():
ranges[key] = list(range(start, start + length))
start += length
return ranges
class NamedRanges:
"""Given a map of {key: count}, give various views into it."""
def __init__(self, counts: Mapping[str, int]):
self._ranges = lengths_to_ranges(counts)
self._size = sum(counts.values())
def __getitem__(self, name) -> List[int]:
return self._ranges[name]
def __contains__(self, name) -> bool:
return name in self._ranges
def set_range(self, name: str, value: List[int]):
"""Overwrite or create a custom range, which may intersect with others."""
self._ranges[name] = value
def range(self, name: str) -> List[int]:
return self[name]
def index(self, name: str) -> int:
rng = self[name]
if len(rng) != 1:
raise ValueError(f"{name} has multiple values")
return rng[0]
def count(self, name: str) -> int:
return len(self[name])
def names(self) -> Iterable[str]:
return self._ranges.keys()
def ranges(self) -> Iterable[Tuple[str, List[int]]]:
return self._ranges.items()
def counts(self) -> Mapping[str, int]:
return {k: len(v) for k, v in self._ranges.items()}
@property
def size(self) -> int:
return self._size
def named_array(self, array: np.ndarray) -> "NamedArray":
return NamedArray(array, self)
def new_named_array(self) -> "NamedArray":
return NamedArray(np.zeros((self.size,)), self)
def new_random_named_array(self) -> "NamedArray":
return NamedArray(np.random.uniform(size=(self.size,)), self)
class NamedArray:
"""Given a numpy array and a NamedRange, access slices by name."""
def __init__(self, array: np.ndarray, names: NamedRanges):
if array.shape != (names.size,):
raise ValueError(f"Wrong sizes: {array.shape} != ({names.size},)")
self._array = array
self._names = names
def __getitem__(
self, name: Union[str, Tuple[str, Union[int, List[int],
slice]]]) -> np.ndarray:
"""Return a read-only view into the array by name."""
if isinstance(name, str):
arr = self._array[self._names[name]]
else:
name, i = name
arr = self._array[np.array(self._names[name])[i]]
if not np.isscalar(arr):
# Read-only because it's indexed by an array of potentially non-contiguous
# indices, which isn't representable as a normal tensor, which forces a
# copy and therefore writes don't modify the underlying array as expected.
arr.flags.writeable = False
return arr
def __setitem__(
self, name: Union[str, Tuple[str, Union[int, List[int], slice]]], value):
"""Set one or more values of a range to a value."""
if isinstance(name, str):
self._array[self._names[name]] = value
else:
name, i = name
self._array[np.array(self._names[name])[i]] = value
@property
def array(self) -> np.ndarray:
return self._array
@property
def names(self) -> NamedRanges:
return self._names
def to_dict(self) -> Mapping[str, np.ndarray]:
return {k: self[k] for k in self._names.names()}
|
{
"content_hash": "a7a18fb256664be501df6cbfecd3a042",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 80,
"avg_line_length": 30.17543859649123,
"alnum_prop": 0.6232558139534884,
"repo_name": "deepmind/deepmind-research",
"id": "857573c79a2c15c4ed3c70eb338f7e63e182b890",
"size": "4035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fusion_tcv/named_array.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1002"
},
{
"name": "C++",
"bytes": "5765"
},
{
"name": "Jupyter Notebook",
"bytes": "12330730"
},
{
"name": "Lua",
"bytes": "76186"
},
{
"name": "OpenEdge ABL",
"bytes": "15630"
},
{
"name": "PureBasic",
"bytes": "8"
},
{
"name": "Python",
"bytes": "3419119"
},
{
"name": "Racket",
"bytes": "226692"
},
{
"name": "Shell",
"bytes": "84450"
},
{
"name": "Starlark",
"bytes": "3463"
}
],
"symlink_target": ""
}
|
VERSION = 10.0
HOST = 'localhost'
PORT = 20010
# This is the path to the upload directory
UPLOAD_FOLDER = 'uploads/'
# These are the extension that we are accepting to be uploaded
ALLOWED_EXTENSIONS = ['txt', 'md', 'pdf', 'doc', 'docx', 'xls', 'xlsx', 'png', 'jpg', 'jpeg', 'gif']
# Should the database be reinitialized each time the server is restarted?
DB_SEED = True
|
{
"content_hash": "4291bf3bacad456e06b353ae894c0dec",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 100,
"avg_line_length": 34.36363636363637,
"alnum_prop": 0.6772486772486772,
"repo_name": "FroeMic/CDTM-Backend-Workshop-WT2016",
"id": "0000b497ea3558987741cabbec01dd68a665cbf0",
"size": "449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solutions/server/server-10-upload-files/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2309764"
},
{
"name": "HTML",
"bytes": "204484"
},
{
"name": "JavaScript",
"bytes": "4859190"
},
{
"name": "Python",
"bytes": "187974"
}
],
"symlink_target": ""
}
|
from .manager import manager
bp = manager
|
{
"content_hash": "669881c6fefc9c3de6c517daddb1e912",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 28,
"avg_line_length": 14.333333333333334,
"alnum_prop": 0.7674418604651163,
"repo_name": "IfengAutomation/mockserver",
"id": "da221fc5f27ee3b09553856141da7ecc0aca11ae",
"size": "43",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mockserver/blueprints/manager/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7057"
},
{
"name": "CoffeeScript",
"bytes": "2660"
},
{
"name": "HTML",
"bytes": "6446"
},
{
"name": "JavaScript",
"bytes": "2841"
},
{
"name": "Python",
"bytes": "15664"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function
from hashlib import sha256
import hmac
import logging
import six
from simplejson import JSONDecodeError
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from django.utils.crypto import constant_time_compare
from django.utils.decorators import method_decorator
from sentry.api import client
from sentry.models import ApiKey, Project, ProjectOption
from sentry.plugins import plugins
from sentry.utils import json
logger = logging.getLogger('sentry.webhooks')
class ReleaseWebhookView(View):
def verify(self, plugin_id, project_id, token, signature):
return constant_time_compare(signature, hmac.new(
key=token.encode('utf-8'),
msg=('{}-{}'.format(plugin_id, project_id)).encode('utf-8'),
digestmod=sha256
).hexdigest())
@method_decorator(csrf_exempt)
def dispatch(self, *args, **kwargs):
return super(ReleaseWebhookView, self).dispatch(*args, **kwargs)
def _handle_builtin(self, request, project):
endpoint = '/projects/{}/{}/releases/'.format(
project.organization.slug,
project.slug,
)
try:
data = json.loads(request.body)
except JSONDecodeError as exc:
return HttpResponse(
status=400,
content=json.dumps({'error': six.text_type(exc)}),
content_type='application/json',
)
try:
# Ideally the API client would support some kind of god-mode here
# as we've already confirmed credentials and simply want to execute
# the view code. Instead we hack around it with an ApiKey instance
god = ApiKey(
organization=project.organization,
scopes=getattr(ApiKey.scopes, 'project:write'),
)
resp = client.post(
endpoint,
data=data,
auth=god,
)
except client.ApiError as exc:
return HttpResponse(
status=exc.status_code,
content=json.dumps(exc.body),
content_type='application/json',
)
return HttpResponse(
status=resp.status_code,
content=json.dumps(resp.data),
content_type='application/json',
)
def post(self, request, plugin_id, project_id, signature):
project = Project.objects.get_from_cache(id=project_id)
token = ProjectOption.objects.get_value(project, 'sentry:release-token')
logger.info('Incoming webhook for project_id=%s, plugin_id=%s',
project_id, plugin_id)
if not self.verify(plugin_id, project_id, token, signature):
logger.warn('Unable to verify signature for release hook')
return HttpResponse(status=403)
if plugin_id == 'builtin':
return self._handle_builtin(request, project)
plugin = plugins.get(plugin_id)
if not plugin.is_enabled(project):
logger.warn('Disabled release hook received for project_id=%s, plugin_id=%s',
project_id, plugin_id)
return HttpResponse(status=403)
cls = plugin.get_release_hook()
hook = cls(project)
hook.handle(request)
return HttpResponse(status=204)
|
{
"content_hash": "f10baa78bb81ef675d6864c59e180a99",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 89,
"avg_line_length": 34.24752475247525,
"alnum_prop": 0.6134721017635155,
"repo_name": "zenefits/sentry",
"id": "301a09627b262814120264adc7b59c45f2a22a3a",
"size": "3459",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/sentry/web/frontend/release_webhook.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "249557"
},
{
"name": "HTML",
"bytes": "293019"
},
{
"name": "JavaScript",
"bytes": "975797"
},
{
"name": "Lua",
"bytes": "22367"
},
{
"name": "Makefile",
"bytes": "5959"
},
{
"name": "Python",
"bytes": "12550461"
},
{
"name": "Ruby",
"bytes": "4026"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
from gloria.service.decorator import task, Property
@task()
class Svc4Task0:
'''Svc4Job0 description'''
def __init__(self):
print(self.__class__.__name__ + ' __init__')
self._my_int_property = 1
self._my_bool_property = False
@Property
def my_int_property(self):
return self._my_int_property
@my_int_property.setter
def my_int_property(self, value):
print('Changing value from {0} to {1}'.format(self._my_int_property, value))
self._my_int_property = value
@Property
def my_bool_property(self):
return self._my_bool_property
def run(self):
print 'Hello from {0}'.format(self.__class__.__name__)
|
{
"content_hash": "27c7ac198537d00a4873ee65f2044082",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 92,
"avg_line_length": 30.807692307692307,
"alnum_prop": 0.5280898876404494,
"repo_name": "unix-beard/gloria",
"id": "454563859cf51a3763fb8868cd806603bdaf8350",
"size": "801",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3_port",
"path": "test/services/svc4/jobs/svc4job0.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "92050"
}
],
"symlink_target": ""
}
|
from random import random
with open('random.txt', 'w') as f:
for k in range(1000000):
print('{:6f}'.format(random()), file=f)
|
{
"content_hash": "c11fd24f5c2c6b8de7ea5fe6db909a1c",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 47,
"avg_line_length": 27.8,
"alnum_prop": 0.6115107913669064,
"repo_name": "ThomasLee969/OperatingSystem",
"id": "4156d035a9bb81bc93bcc13e82a5a44869a2db9e",
"size": "139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2-advanced-ipc/gen_randoms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "142"
},
{
"name": "C",
"bytes": "137923"
},
{
"name": "C++",
"bytes": "233868"
},
{
"name": "Python",
"bytes": "9086"
}
],
"symlink_target": ""
}
|
"""Contains benchmark imports and a list of benchmarks.
All modules within this package are considered benchmarks, and are loaded
dynamically. Add non-benchmark code to other packages.
"""
from perfkitbenchmarker import import_util
def _LoadBenchmarks():
return list(import_util.LoadModulesForPath(__path__, __name__))
BENCHMARKS = _LoadBenchmarks()
VALID_BENCHMARKS = {module.BENCHMARK_NAME: module
for module in BENCHMARKS}
|
{
"content_hash": "5ed3e1ff2215e0b5402a3de9c366fe1e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 73,
"avg_line_length": 28.375,
"alnum_prop": 0.7422907488986784,
"repo_name": "kivio/PerfKitBenchmarker",
"id": "399ebe75683b8b32ca203078b8b864a05ae4f76b",
"size": "1064",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "perfkitbenchmarker/benchmarks/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1128835"
},
{
"name": "Shell",
"bytes": "25650"
}
],
"symlink_target": ""
}
|
import os
import sys
from gunicorn.errors import ConfigError
from gunicorn.app.base import Application
from gunicorn.app import djangoapp
from gunicorn import util
class WSGIApplication(Application):
def init(self, parser, opts, args):
if opts.paste and opts.paste is not None:
path = os.path.abspath(os.path.normpath(
os.path.join(util.getcwd(), opts.paste)))
if not os.path.exists(path):
raise ConfigError("%r not found" % val)
# paste application, load the config
self.cfgurl = 'config:%s' % path
self.relpath = os.path.dirname(path)
from .pasterapp import paste_config
return paste_config(self.cfg, self.cfgurl, self.relpath)
if len(args) != 1:
parser.error("No application module specified.")
self.cfg.set("default_proc_name", args[0])
self.app_uri = args[0]
def chdir(self):
# chdir to the configured path before loading,
# default is the current dir
os.chdir(self.cfg.chdir)
# add the path to sys.path
sys.path.insert(0, self.cfg.chdir)
def load_wsgiapp(self):
self.chdir()
# load the app
return util.import_app(self.app_uri)
def load_pasteapp(self):
self.chdir()
# load the paste app
from .pasterapp import load_pasteapp
return load_pasteapp(self.cfgurl, self.relpath, global_conf=None)
def load(self):
if self.cfg.paste is not None:
return self.load_pasteapp()
else:
return self.load_wsgiapp()
def run():
"""\
The ``gunicorn`` command line runner for launching Ghunicorn with
generic WSGI applications.
"""
from gunicorn.app.wsgiapp import WSGIApplication
WSGIApplication("%(prog)s [OPTIONS] [APP_MODULE]").run()
if __name__ == '__main__':
run()
|
{
"content_hash": "95b8a02068ecb48d071aa6fa1f5586be",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 73,
"avg_line_length": 27.4,
"alnum_prop": 0.6100104275286757,
"repo_name": "bertucho/moviestalk",
"id": "e772cee3f5eec88356aef50b75521cbce0000d85",
"size": "2046",
"binary": false,
"copies": "24",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/gunicorn/app/wsgiapp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "JavaScript",
"bytes": "2338"
},
{
"name": "Python",
"bytes": "24409"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
}
|
from suplemon.linelight.color_map import color_map
class Syntax:
def get_comment(self):
return ("-- ", "")
def get_color(self, raw_line):
color = color_map["white"]
return color
|
{
"content_hash": "567c5c6ec2b7cf8d4ab8d3c4cc47fe7b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 50,
"avg_line_length": 21.3,
"alnum_prop": 0.6009389671361502,
"repo_name": "richrd/suplemon",
"id": "3a33921d14902852ebf162b6ce5e2a4714d14c1f",
"size": "213",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "suplemon/linelight/lua.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "221419"
},
{
"name": "Shell",
"bytes": "1057"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(name='pt-law-downloader',
version='1.0.0',
description='Downloader of the official texts of the portuguese law.',
long_description=open('README.md').read(),
author='Jorge C. Leitão',
url='https://github.com/jorgecarleitao/pt_law_downloader',
py_modules=['pt_law_downloader'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=['Beautifulsoup4'])
|
{
"content_hash": "d059421d8359e44482338e40f9938748",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 76,
"avg_line_length": 40.25,
"alnum_prop": 0.6273291925465838,
"repo_name": "publicos-pt/pt_law_downloader",
"id": "5a9c43a3abde333b05ea7c3604d2f1e58835d77a",
"size": "645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13249"
}
],
"symlink_target": ""
}
|
def lovefunc(flower, flower2):
return (flower + flower2) % 2 != 0
|
{
"content_hash": "39efa9296e2b7f3df8f8e30376c38ad3",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 38,
"avg_line_length": 35,
"alnum_prop": 0.6428571428571429,
"repo_name": "the-zebulan/CodeWars",
"id": "c5bfd11b12cfac832b63a450c555c610d092a828",
"size": "70",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "katas/kyu_8/opposites_attract.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
}
|
import numpy as np
from asap3.MonteCarlo.Base import GlobalOptimizer
from ase.units import kB
from ase.optimize import LBFGS
class BasinHopping(GlobalOptimizer):
def __init__(self, atoms, log='-', traj=None, localopt=LBFGS, fmax=1e-2):
GlobalOptimizer.__init__(self, atoms, log, traj)
self.localoptimizer = localopt
self.fmax = fmax
self.Eo = atoms.get_potential_energy()
self.En = 1.0e32
self.P = 0.0
self.log_header = ('%-15s %-12s %-6s %-7s\n' %
('Energy', 'Probability', 'Accept', 'OptStep'))
def initialize(self, temp=1000.0):
GlobalOptimizer.initialize(self)
self.temp = temp
self.log_info = '\nTemperature: %i\n' % (temp,)
def evaluate_move(self):
dyn = self.localoptimizer(self.atoms, logfile=None)
dyn.run(fmax=self.fmax, steps=1000)
self.En = self.atoms.get_potential_energy()
self.P = min(1.0, np.exp((self.Eo - self.En) / (self.temp * kB)))
accept = self.P > np.random.uniform()
self.log_string = ('%-15.6f %-12.6f %-6s %-7i\n' %
(self.En, self.P, accept, dyn.nsteps))
return accept
def accept_move(self):
self.Eo = self.En
GlobalOptimizer.accept_move(self)
|
{
"content_hash": "67a02b79354f101c86ccff07b5aec20f",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 77,
"avg_line_length": 32.7,
"alnum_prop": 0.5802752293577982,
"repo_name": "auag92/n2dm",
"id": "da29ae85518b642bd6e32597b3b73e820cbebf26",
"size": "1308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Asap-3.8.4/Python/asap3/MonteCarlo/Basin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4529"
},
{
"name": "C++",
"bytes": "1472384"
},
{
"name": "CSS",
"bytes": "5059"
},
{
"name": "Jupyter Notebook",
"bytes": "7328"
},
{
"name": "Makefile",
"bytes": "86067"
},
{
"name": "Matlab",
"bytes": "87"
},
{
"name": "Python",
"bytes": "1232765"
},
{
"name": "Shell",
"bytes": "13226"
},
{
"name": "Smarty",
"bytes": "4212"
},
{
"name": "TeX",
"bytes": "5561"
}
],
"symlink_target": ""
}
|
"""Syslog notification service."""
import syslog
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
CONF_FACILITY = "facility"
CONF_OPTION = "option"
CONF_PRIORITY = "priority"
SYSLOG_FACILITY = {
"kernel": "LOG_KERN",
"user": "LOG_USER",
"mail": "LOG_MAIL",
"daemon": "LOG_DAEMON",
"auth": "LOG_KERN",
"LPR": "LOG_LPR",
"news": "LOG_NEWS",
"uucp": "LOG_UUCP",
"cron": "LOG_CRON",
"syslog": "LOG_SYSLOG",
"local0": "LOG_LOCAL0",
"local1": "LOG_LOCAL1",
"local2": "LOG_LOCAL2",
"local3": "LOG_LOCAL3",
"local4": "LOG_LOCAL4",
"local5": "LOG_LOCAL5",
"local6": "LOG_LOCAL6",
"local7": "LOG_LOCAL7",
}
SYSLOG_OPTION = {
"pid": "LOG_PID",
"cons": "LOG_CONS",
"ndelay": "LOG_NDELAY",
"nowait": "LOG_NOWAIT",
"perror": "LOG_PERROR",
}
SYSLOG_PRIORITY = {
5: "LOG_EMERG",
4: "LOG_ALERT",
3: "LOG_CRIT",
2: "LOG_ERR",
1: "LOG_WARNING",
0: "LOG_NOTICE",
-1: "LOG_INFO",
-2: "LOG_DEBUG",
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_FACILITY, default="syslog"): vol.In(SYSLOG_FACILITY.keys()),
vol.Optional(CONF_OPTION, default="pid"): vol.In(SYSLOG_OPTION.keys()),
vol.Optional(CONF_PRIORITY, default=-1): vol.In(SYSLOG_PRIORITY.keys()),
}
)
def get_service(hass, config, discovery_info=None):
"""Get the syslog notification service."""
facility = getattr(syslog, SYSLOG_FACILITY[config.get(CONF_FACILITY)])
option = getattr(syslog, SYSLOG_OPTION[config.get(CONF_OPTION)])
priority = getattr(syslog, SYSLOG_PRIORITY[config.get(CONF_PRIORITY)])
return SyslogNotificationService(facility, option, priority)
class SyslogNotificationService(BaseNotificationService):
"""Implement the syslog notification service."""
def __init__(self, facility, option, priority):
"""Initialize the service."""
self._facility = facility
self._option = option
self._priority = priority
def send_message(self, message="", **kwargs):
"""Send a message to syslog."""
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
syslog.openlog(title, self._option, self._facility)
syslog.syslog(self._priority, message)
syslog.closelog()
|
{
"content_hash": "a6c5ff087aa00cd46f84dd225dd0620d",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 86,
"avg_line_length": 26.097826086956523,
"alnum_prop": 0.6197417742607247,
"repo_name": "nkgilley/home-assistant",
"id": "5a5bac002e42131bb0ff45bfeaefc23eda059678",
"size": "2401",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/syslog/notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_pim_rp_address
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages configuration of an PIM static RP address instance.
description:
- Manages configuration of an Protocol Independent Multicast (PIM) static
rendezvous point (RP) address instance.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- C(state=absent) is currently not supported on all platforms.
options:
rp_address:
description:
- Configures a Protocol Independent Multicast (PIM) static
rendezvous point (RP) address. Valid values are
unicast addresses.
required: true
group_list:
description:
- Group range for static RP. Valid values are multicast addresses.
required: false
default: null
prefix_list:
description:
- Prefix list policy for static RP. Valid values are prefix-list
policy names.
required: false
default: null
route_map:
description:
- Route map policy for static RP. Valid values are route-map
policy names.
required: false
default: null
bidir:
description:
- Group range is treated in PIM bidirectional mode.
required: false
choices: ['true','false']
default: null
'''
EXAMPLES = '''
- nxos_pim_rp_address:
rp_address: "10.1.1.20"
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["router bgp 65535", "vrf test", "router-id 1.1.1.1"]
'''
import re
from ansible.module_utils.nxos import get_config, load_config
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
def get_existing(module, args):
existing = {}
config = str(get_config(module))
address = module.params['rp_address']
pim_address_re = r'ip pim rp-address (?P<value>.*)$'
for line in re.findall(pim_address_re, config, re.M):
values = line.split()
if values[0] != address:
continue
existing['bidir'] = existing.get('bidir') or 'bidir' in line
if len(values) > 2:
value = values[2]
if values[1] == 'route-map':
existing['route_map'] = value
elif values[1] == 'prefix-list':
existing['prefix_list'] = value
elif values[1] == 'group-list':
existing['group_list'] = value
return existing
def state_present(module, existing, proposed, candidate):
address = module.params['rp_address']
command = 'ip pim rp-address {0}'.format(address)
commands = build_command(proposed, command)
if commands:
candidate.add(commands, parents=[])
def build_command(param_dict, command):
for param in ['group_list', 'prefix_list', 'route_map']:
if param_dict.get(param):
command += ' {0} {1}'.format(
param.replace('_', '-'), param_dict.get(param))
if param_dict.get('bidir'):
command += ' bidir'
return [command]
def state_absent(module, existing, candidate):
address = module.params['rp_address']
command = 'no ip pim rp-address {0}'.format(address)
if existing.get('group_list'):
commands = build_command(existing, command)
else:
commands = [command]
candidate.add(commands, parents=[])
def main():
argument_spec = dict(
rp_address=dict(required=True, type='str'),
group_list=dict(required=False, type='str'),
prefix_list=dict(required=False, type='str'),
route_map=dict(required=False, type='str'),
bidir=dict(required=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present', required=False),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['group_list', 'route_map'],
['group_list', 'prefix_list'],
['route_map', 'prefix_list']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'commands': [], 'warnings': warnings}
state = module.params['state']
args = [
'rp_address',
'group_list',
'prefix_list',
'route_map',
'bidir'
]
existing = get_existing(module, args)
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key != 'rp_address':
if str(value).lower() == 'true':
value = True
elif str(value).lower() == 'false':
value = False
if existing.get(key) != value:
proposed[key] = value
candidate = CustomNetworkConfig(indent=3)
if state == 'present' and (proposed or not existing):
state_present(module, existing, proposed, candidate)
elif state == 'absent' and existing:
state_absent(module, existing, candidate)
if candidate:
candidate = candidate.items_text()
result['commands'] = candidate
result['changed'] = True
load_config(module, candidate)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
{
"content_hash": "14291c1ddad3258a54d514a670ddb825",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 85,
"avg_line_length": 29.910526315789475,
"alnum_prop": 0.600739046278374,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "dba519c02db5a4c3db2b85d76da18cd3e0f6a9a7",
"size": "6358",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/nxos/nxos_pim_rp_address.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, absolute_import
from django.contrib.auth.models import AbstractUser
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_("Name of User"), blank=True, max_length=255)
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
def get_full_name(self):
return "{0} {1}".format(self.first_name, self.last_name)
def get_short_name(self):
return self.last_name
|
{
"content_hash": "16ace9c2bb9d8fb0142eed1443fa5fc0",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 74,
"avg_line_length": 30.88888888888889,
"alnum_prop": 0.7146282973621103,
"repo_name": "jeuvreyl/viewer-django",
"id": "0b249e28aec0867bd33d249a9bf71b2212561b46",
"size": "858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "viewer/users/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6986"
},
{
"name": "CSS",
"bytes": "1473"
},
{
"name": "HTML",
"bytes": "20300"
},
{
"name": "JavaScript",
"bytes": "3142"
},
{
"name": "Makefile",
"bytes": "7409"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "53641"
},
{
"name": "Shell",
"bytes": "4535"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from .models import Experiment, Test
class TestInline(admin.TabularInline):
model = Test
max_num = 2
extra = 2
class ExperimentAdmin(admin.ModelAdmin):
inlines = (TestInline, )
list_display = ('name', 'start', 'end', 'is_active', )
# exclude = ('status', )
search_fields = ('name', )
list_filter = ('start', 'end', 'is_active', )
ordering = ('-start', )
admin.site.register(Experiment, ExperimentAdmin)
|
{
"content_hash": "18e0cd8197eec0dffc40d94bb2445cac",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 58,
"avg_line_length": 23.9,
"alnum_prop": 0.6401673640167364,
"repo_name": "htadg/ABToast",
"id": "a778cac1db6863477ed4fcc4d8a73803f9d600b3",
"size": "478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ABToast/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8209"
}
],
"symlink_target": ""
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattercarpet.marker.colorbar"
_path_str = "scattercarpet.marker.colorbar.tickfont"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattercarpet.
marker.colorbar.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattercarpet.marker.colorbar.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.marker.colorbar.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
{
"content_hash": "40c2d05a90244f094b1aa727d0e26abf",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 83,
"avg_line_length": 37.61233480176212,
"alnum_prop": 0.561841180604357,
"repo_name": "plotly/python-api",
"id": "82bb0600607127d9a15d5e5132f020191a0e5c1a",
"size": "8538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/scattercarpet/marker/colorbar/_tickfont.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
import curses
import calendar
import gevent
import time
import global_mod as g
import getstr
def draw_window(state, window):
window.clear()
window.refresh()
win_header = curses.newwin(5, 75, 0, 0)
if 'browse_height' in state['blocks']:
height = str(state['blocks']['browse_height'])
if height in state['blocks']:
blockdata = state['blocks'][height]
win_header.addstr(0, 1, "height: " + height.zfill(6) + " (J/K: browse, HOME/END: quicker, L: latest, G: seek)", curses.A_BOLD)
win_header.addstr(1, 1, "hash: " + blockdata['hash'], curses.A_BOLD)
win_header.addstr(2, 1, "root: " + blockdata['merkleroot'], curses.A_BOLD)
win_header.addstr(3, 1, str(blockdata['size']) + " bytes (" + str(blockdata['size']/1024) + " KB) ", curses.A_BOLD)
win_header.addstr(3, 26, "diff: " + "{:,d}".format(int(blockdata['difficulty'])), curses.A_BOLD)
win_header.addstr(3, 52, time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(blockdata['time'])), curses.A_BOLD)
win_header.addstr(4, 51, ("v" + str(blockdata['version'])).rjust(20), curses.A_BOLD)
draw_transactions(state)
state['blocks']['loaded'] = 1
else:
win_header.addstr(0, 1, "no block information loaded", curses.A_BOLD + curses.color_pair(3))
win_header.addstr(1, 1, "press 'G' to enter a block hash, height, or timestamp", curses.A_BOLD)
else:
win_header.addstr(0, 1, "no block information loaded", curses.A_BOLD + curses.color_pair(3))
win_header.addstr(1, 1, "press 'G' to enter a block hash, height, or timestamp", curses.A_BOLD)
win_header.refresh()
def draw_transactions(state):
window_height = state['y'] - 6
win_transactions = curses.newwin(window_height, 75, 5, 0)
height = str(state['blocks']['browse_height'])
blockdata = state['blocks'][height]
tx_count = len(blockdata['tx'])
bytes_per_tx = blockdata['size'] / tx_count
win_transactions.addstr(0, 1, "Transactions: " + ("% 4d" % tx_count + " (" + str(bytes_per_tx) + " bytes/tx)").ljust(26) + "(UP/DOWN: scroll, ENTER: view)", curses.A_BOLD + curses.color_pair(5))
# reset cursor if it's been resized off the bottom
if state['blocks']['cursor'] > state['blocks']['offset'] + (window_height-2):
state['blocks']['offset'] = state['blocks']['cursor'] - (window_height-2)
offset = state['blocks']['offset']
for index in range(offset, offset+window_height-1):
if index < len(blockdata['tx']):
if index == state['blocks']['cursor']:
win_transactions.addstr(index+1-offset, 1, ">", curses.A_REVERSE + curses.A_BOLD)
condition = (index == offset+window_height-2) and (index+1 < len(blockdata['tx']))
condition = condition or ( (index == offset) and (index > 0) )
if condition:
win_transactions.addstr(index+1-offset, 3, "...")
else:
win_transactions.addstr(index+1-offset, 3, blockdata['tx'][index])
win_transactions.refresh()
def draw_input_window(state, window, rpcc):
color = curses.color_pair(1)
if 'testnet' in state:
if state['testnet']: color = curses.color_pair(2)
window.clear()
window.addstr(0, 1, "bitcoind-ncurses " + g.version + " [block input mode]", color + curses.A_BOLD)
window.addstr(1, 1, "please enter block height or hash", curses.A_BOLD)
# window.addstr(2, 1, "or timestamp (accepted formats: YYYY-MM-DD hh:mm:ss, YYYY-MM-DD)", curses.A_BOLD)
window.refresh()
entered_block = getstr.getstr(67, 4, 1) # w, y, x
"""
entered_block_timestamp = 0
try:
entered_block_time = time.strptime(entered_block, "%Y-%m-%d")
entered_block_timestamp = calendar.timegm(entered_block_time)
except: pass
try:
entered_block_time = time.strptime(entered_block, "%Y-%m-%d %H:%M:%S")
entered_block_timestamp = calendar.timegm(entered_block_time)
except: pass
if entered_block_timestamp:
s = {'findblockbytimestamp': entered_block_timestamp}
rpc_queue.put(s)
window.addstr(5, 1, "waiting for block (will stall here if not found)", color + curses.A_BOLD)
window.refresh()
state['mode'] = "block"
elif len(entered_block) == 64:
"""
if len(entered_block) == 64:
rpcc.request("getblock", entered_block)
window.addstr(5, 1, "waiting for block (will stall here if not found)", color + curses.A_BOLD)
window.refresh()
state['mode'] = "block"
elif (len(entered_block) < 7) and entered_block.isdigit() and (int(entered_block) <= state['mininginfo']['blocks']):
if entered_block in state['blocks']:
state['blocks']['browse_height'] = int(entered_block)
state['mode'] = "block"
draw_window(state, window)
else:
rpcc.request("getblockhash", int(entered_block))
window.addstr(5, 1, "waiting for block (will stall here if not found)", color + curses.A_BOLD)
window.refresh()
state['mode'] = "block"
state['blocks']['browse_height'] = int(entered_block)
else:
window.addstr(5, 1, "not a valid hash, height, or timestamp format", color + curses.A_BOLD)
window.refresh()
gevent.sleep(0.5)
window.clear()
window.refresh()
state['mode'] = "monitor"
|
{
"content_hash": "d0dbc0cee44b446ac80eb1807c7d47b3",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 198,
"avg_line_length": 40.68888888888889,
"alnum_prop": 0.597669761514655,
"repo_name": "esotericnonsense/bitcoind-ncurses",
"id": "945a3b3b113d51cecbabc515a71a18ff6cbf9a80",
"size": "5515",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "block.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "86131"
}
],
"symlink_target": ""
}
|
import numpy as np
from IPython import embed
from teafacto.blocks.seq.oldseqproc import SimpleVec2Idx, MemVec2Idx
from teafacto.blocks.seq.enc import SimpleSeq2Vec
from teafacto.blocks.seq.enc import Seq2Idx, Seq2Vec
from teafacto.blocks.lang.wordvec import Glove
from teafacto.blocks.memory import LinearGateMemAddr, DotMemAddr
from teafacto.scripts.simplequestions.subjdet import _readdata, getmemdata as gmd
from teafacto.util import argprun
def readdata(p):
train, valid, test, x, newdic = _readdata(p)
return train, valid, test, x["worddic"], newdic
def getmemdata(reldic, worddic, labelp="../../../data/simplequestions/labels.map"): # updates worddic with words found in relation
return gmd(reldic, worddic, labelp)
def toglove(wordmat, worddic, dim=50): # TOREMOVE
g = Glove(dim)
gws = set(g.D.keys())
wdws = set(worddic.keys())
diff = wdws.difference(gws)
# gather states about diff
diffcounts = {worddic[k]: 0 for k in diff}
total = 0
moretal = 0
for i in range(wordmat.shape[0]):
for j in range(wordmat.shape[1]):
if wordmat[i, j] >= 0:
total += 1
if wordmat[i, j] in diffcounts:
diffcounts[wordmat[i, j]] += 1
moretal += 1
diffcounts = sorted(diffcounts.items(), key=lambda (k, v): v, reverse=True)
print "%d words unknown by Glove of %d total words" % (moretal, total)
revdic = {v: k for k, v in worddic.items()}
d2g = lambda x: g * revdic[x] if x in revdic else x
newdic = {k: d2g(v) for k, v in worddic.items()}
newmat = np.vectorize(d2g)(wordmat)
revgdic = {v: k for k, v in g.D.items()}
embed()
def getdic2glove(worddic, dim=50, trainfrac=0.0):
g = Glove(dim, trainfrac=trainfrac)
revdic = {v: k for k, v in worddic.items()}
d2g = lambda x: g * revdic[x] if x in revdic else x
newdic = {k: d2g(v) for k, v in worddic.items()}
return d2g, newdic, g
def evaluate(pred, gold):
return np.sum(gold == pred) * 100. / gold.shape[0]
def run(
epochs=10,
numbats=100,
numsam=10000,
lr=0.1,
datap="../../../data/simplequestions/datamat.word.pkl",
embdim=50,
innerdim=200,
wreg=0.00005,
bidir=False,
keepmincount=5,
sameenc=False,
memaddr="dot",
memattdim=100,
layers=1,
embtrainfrac=0.0,
mem=False,
membidir=False,
memlayers=1,
):
""" Memory match-based glove-based word-level relation classification """
(traindata, traingold), (validdata, validgold), (testdata, testgold), worddic, entdic\
= readdata(datap)
# get words from relation names, update word dic
memdata = getmemdata(entdic, worddic)
# get glove and transform word mats to glove index space
d2g, newdic, glove = getdic2glove(worddic, dim=embdim, trainfrac=embtrainfrac)
traindata, validdata, testdata, memdata = \
[np.vectorize(d2g)(x) for x in [traindata, validdata, testdata, memdata]]
print traindata.shape, testdata.shape
numwords = max(worddic.values()) + 1 # don't use this, use glove
numrels = max(entdic.values()) + 1
if bidir:
encinnerdim = [innerdim/2]*layers
else:
encinnerdim = [innerdim]*layers
wordemb = glove.block
rnn, lastdim = SimpleSeq2Vec.makernu(embdim, encinnerdim, bidir=bidir)
enc = Seq2Vec(wordemb, rnn, maskid=-1)
if mem:
memembdim = embdim
if membidir:
innerdim = [innerdim/2]*memlayers
else:
innerdim = [innerdim]*memlayers
memrnn, memlastdim = SimpleSeq2Vec.makernu(memembdim, innerdim, bidir=membidir)
memenc = Seq2Vec(wordemb, memrnn, maskid=-1)
if memaddr is None or memaddr == "dot":
memaddr = DotMemAddr
elif memaddr == "lin":
memaddr = LinearGateMemAddr
dec = MemVec2Idx(memenc, memdata, memdim=innerdim, memaddr=memaddr, memattdim=memattdim)
else:
dec = SimpleVec2Idx(indim=innerdim, outdim=numrels)
m = Seq2Idx(enc, dec)
m = m.train([traindata], traingold).adagrad(lr=lr).l2(wreg).grad_total_norm(1.0).cross_entropy()\
.validate_on([validdata], validgold).accuracy().cross_entropy().takebest()\
.train(numbats=numbats, epochs=epochs)
pred = m.predict(testdata)
print pred.shape
evalres = evaluate(np.argmax(pred, axis=1), testgold)
print str(evalres) + "%"
if __name__ == "__main__":
argprun(run)
|
{
"content_hash": "13e5be4fb9b3ee72b03ddb1f0c212d4e",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 133,
"avg_line_length": 33.4264705882353,
"alnum_prop": 0.6315442146942367,
"repo_name": "lukovnikov/teafacto",
"id": "a98a40c303da5b660a1f837c702af1fa1521f9ad",
"size": "4546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "teafacto/scripts/simplequestions/subjdetglove.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "56665"
},
{
"name": "Python",
"bytes": "819448"
},
{
"name": "Shell",
"bytes": "102"
}
],
"symlink_target": ""
}
|
import math
import numpy as np
import pandas as pd
from scipy import stats
from scipy.stats import norm
import scipy.stats as stats
import scipy.stats as st
import matplotlib
import matplotlib.pyplot as plt
import re
import scipy.stats
import matplotlib.pyplot as mlab
fhand = raw_input('Enter .csv file name or keyword: ')
data = pd.read_csv(fhand, header=0)
frame = pd.DataFrame(data)
|
{
"content_hash": "c2d78a1520319d5502bd517853d4d390",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 54,
"avg_line_length": 21.666666666666668,
"alnum_prop": 0.7846153846153846,
"repo_name": "lastralab/Statistics",
"id": "32fb794f54cf0b110fe6ed9e49636868ba2fe734",
"size": "484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Specialization/Personal/Rtopy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "89"
},
{
"name": "Python",
"bytes": "249488"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('directorio', '0021_auto_20160506_1031'),
]
operations = [
migrations.AlterField(
model_name='profesor',
name='organizacion',
field=models.IntegerField(blank=True, choices=[(0, 'Asturias'), (1, 'Génesis'), (2, 'IEP')], null=True),
),
migrations.AlterField(
model_name='profesor',
name='telefono',
field=models.CharField(blank=True, max_length=75, null=True),
),
]
|
{
"content_hash": "e77be2c2ba2313dccaa60ada38be9412",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 116,
"avg_line_length": 27.608695652173914,
"alnum_prop": 0.5795275590551181,
"repo_name": "josemfc/directorio-iep",
"id": "3ce2a9b488a367f4dbf423b85f5fd68674dd18c5",
"size": "708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "directorio/migrations/0022_auto_20160506_1347.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8249"
},
{
"name": "HTML",
"bytes": "85914"
},
{
"name": "JavaScript",
"bytes": "1763"
},
{
"name": "Python",
"bytes": "133601"
}
],
"symlink_target": ""
}
|
from expects import expect
RETURN_VALUE = '42'
with description('Refactoring goodies'):
def a_method(self, return_value=RETURN_VALUE):
return return_value
with it('uses methods defined inside its context'):
expect(self.a_method()).to.be.equal(RETURN_VALUE)
with context('when using nested contexts'):
with it('uses methods defined inside its parent'):
expect(self.a_method()).to.be.equal(RETURN_VALUE)
|
{
"content_hash": "62ac03a4457e870fb434dd1e2cf83759",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 61,
"avg_line_length": 25.444444444444443,
"alnum_prop": 0.6746724890829694,
"repo_name": "jaimegildesagredo/expects_docs_mamba_formatter",
"id": "5fa58655fe1c03fad67c0bd9dcf226169991df94",
"size": "458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spec/refactoring_goodies_spec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58271"
}
],
"symlink_target": ""
}
|
"""
model resource define
@author xu_fanghui
@date 2014-09-03
@version demo 0.1
"""
from django.db import models
class Company(models.Model):
name = models.CharField(unique=True, max_length=255)
simple_name = models.CharField(unique=True, max_length=60)
address = models.CharField(max_length=255, blank=True)
register_date = models.DateField(blank=True, null=True)
money = models.IntegerField(blank=True, null=True)
type = models.CharField(blank=True, max_length=255)
description = models.TextField(blank=True)
class Sector(models.Model):
name = models.CharField( max_length=255)
simple_name = models.CharField(max_length=20)
description = models.TextField(blank=True)
company=models.ForeignKey(Company)
class Staff(models.Model):
name = models.CharField(max_length=255)
en_name = models.CharField(max_length=255)
birthday = models.DateField(blank=True, null=True)
sex = models.CharField(max_length=2, blank=True)
address = models.CharField(max_length=255, blank=True)
job_name = models.CharField(max_length=100, blank=True)
telephone = models.CharField(unique=True, max_length=13, blank=True)
email = models.EmailField()
card_id = models.CharField(max_length=18, blank=True)
sectors=models.ManyToManyField(Sector)
|
{
"content_hash": "af8b249e772463bd4a03be03464f515c",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 72,
"avg_line_length": 36.72222222222222,
"alnum_prop": 0.716338880484115,
"repo_name": "xufanghui/djg-rest-demo",
"id": "b581ad6c3cf573b088e31ccd5f132a9c63235397",
"size": "1322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "5928"
}
],
"symlink_target": ""
}
|
"""
Manages information about the host OS and hypervisor.
This class encapsulates a connection to the libvirt
daemon and provides certain higher level APIs around
the raw libvirt API. These APIs are then used by all
the other libvirt related classes
"""
import operator
import os
import socket
import sys
import threading
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
from oslo_utils import versionutils
import six
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import rpc
from nova import utils
from nova.virt import event as virtevent
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import guest as libvirt_guest
libvirt = None
LOG = logging.getLogger(__name__)
native_socket = patcher.original('socket')
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue" if six.PY2 else "queue")
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
# This list is for libvirt hypervisor drivers that need special handling.
# This is *not* the complete list of supported hypervisor drivers.
HV_DRIVER_QEMU = "QEMU"
HV_DRIVER_XEN = "Xen"
class DomainJobInfo(object):
"""Information about libvirt background jobs
This class encapsulates information about libvirt
background jobs. It provides a mapping from either
the old virDomainGetJobInfo API which returned a
fixed list of fields, or the modern virDomainGetJobStats
which returns an extendable dict of fields.
"""
_have_job_stats = True
def __init__(self, **kwargs):
self.type = kwargs.get("type", libvirt.VIR_DOMAIN_JOB_NONE)
self.time_elapsed = kwargs.get("time_elapsed", 0)
self.time_remaining = kwargs.get("time_remaining", 0)
self.downtime = kwargs.get("downtime", 0)
self.setup_time = kwargs.get("setup_time", 0)
self.data_total = kwargs.get("data_total", 0)
self.data_processed = kwargs.get("data_processed", 0)
self.data_remaining = kwargs.get("data_remaining", 0)
self.memory_total = kwargs.get("memory_total", 0)
self.memory_processed = kwargs.get("memory_processed", 0)
self.memory_remaining = kwargs.get("memory_remaining", 0)
self.memory_constant = kwargs.get("memory_constant", 0)
self.memory_normal = kwargs.get("memory_normal", 0)
self.memory_normal_bytes = kwargs.get("memory_normal_bytes", 0)
self.memory_bps = kwargs.get("memory_bps", 0)
self.disk_total = kwargs.get("disk_total", 0)
self.disk_processed = kwargs.get("disk_processed", 0)
self.disk_remaining = kwargs.get("disk_remaining", 0)
self.disk_bps = kwargs.get("disk_bps", 0)
self.comp_cache = kwargs.get("compression_cache", 0)
self.comp_bytes = kwargs.get("compression_bytes", 0)
self.comp_pages = kwargs.get("compression_pages", 0)
self.comp_cache_misses = kwargs.get("compression_cache_misses", 0)
self.comp_overflow = kwargs.get("compression_overflow", 0)
@classmethod
def _get_job_stats_compat(cls, dom):
# Make the old virDomainGetJobInfo method look similar to the
# modern virDomainGetJobStats method
try:
info = dom.jobInfo()
except libvirt.libvirtError as ex:
# When migration of a transient guest completes, the guest
# goes away so we'll see NO_DOMAIN error code
#
# When migration of a persistent guest completes, the guest
# merely shuts off, but libvirt unhelpfully raises an
# OPERATION_INVALID error code
#
# Lets pretend both of these mean success
if ex.get_error_code() in (libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
LOG.debug("Domain has shutdown/gone away: %s", ex)
return cls(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job info: %s", ex)
raise
return cls(
type=info[0],
time_elapsed=info[1],
time_remaining=info[2],
data_total=info[3],
data_processed=info[4],
data_remaining=info[5],
memory_total=info[6],
memory_processed=info[7],
memory_remaining=info[8],
disk_total=info[9],
disk_processed=info[10],
disk_remaining=info[11])
@classmethod
def for_domain(cls, dom):
'''Get job info for the domain
Query the libvirt job info for the domain (ie progress
of migration, or snapshot operation)
Returns: a DomainJobInfo instance
'''
if cls._have_job_stats:
try:
stats = dom.jobStats()
return cls(**stats)
except libvirt.libvirtError as ex:
if ex.get_error_code() == libvirt.VIR_ERR_NO_SUPPORT:
# Remote libvirt doesn't support new API
LOG.debug("Missing remote virDomainGetJobStats: %s", ex)
cls._have_job_stats = False
return cls._get_job_stats_compat(dom)
elif ex.get_error_code() in (
libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
# Transient guest finished migration, so it has gone
# away completely
LOG.debug("Domain has shutdown/gone away: %s", ex)
return cls(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job stats: %s", ex)
raise
except AttributeError as ex:
# Local python binding doesn't support new API
LOG.debug("Missing local virDomainGetJobStats: %s", ex)
cls._have_job_stats = False
return cls._get_job_stats_compat(dom)
else:
return cls._get_job_stats_compat(dom)
class Host(object):
def __init__(self, uri, read_only=False,
conn_event_handler=None,
lifecycle_event_handler=None):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._uri = uri
self._read_only = read_only
self._conn_event_handler = conn_event_handler
self._lifecycle_event_handler = lifecycle_event_handler
self._skip_list_all_domains = False
self._caps = None
self._hostname = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._event_queue = None
self._events_delayed = {}
# Note(toabctl): During a reboot of a domain, STOPPED and
# STARTED events are sent. To prevent shutting
# down the domain during a reboot, delay the
# STOPPED lifecycle event some seconds.
self._lifecycle_delay = 15
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self._queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug('Connection to libvirt broke')
return False
raise
@staticmethod
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
@staticmethod
def _connect(uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
Host._connect_auth_cb,
None]
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
# tpool.proxy_call creates a native thread. Due to limitations
# with eventlet locking we cannot use the logging API inside
# the called function.
return tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
libvirt.openAuth, uri, auth, flags)
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
# call possibly with delay
self._event_emit_delayed(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = str(last_close_event['reason'])
msg = _("Connection to libvirt lost: %s") % reason
self._wrapped_conn = None
if self._conn_event_handler is not None:
self._conn_event_handler(False, msg)
def _event_emit_delayed(self, event):
"""Emit events - possibly delayed."""
def event_cleanup(gt, *args, **kwargs):
"""Callback function for greenthread. Called
to cleanup the _events_delayed dictionary when an event
was called.
"""
event = args[0]
self._events_delayed.pop(event.uuid, None)
# Cleanup possible delayed stop events.
if event.uuid in self._events_delayed.keys():
self._events_delayed[event.uuid].cancel()
self._events_delayed.pop(event.uuid, None)
LOG.debug("Removed pending event for %s due to "
"lifecycle event", event.uuid)
if event.transition == virtevent.EVENT_LIFECYCLE_STOPPED:
# Delay STOPPED event, as they may be followed by a STARTED
# event in case the instance is rebooting
id_ = greenthread.spawn_after(self._lifecycle_delay,
self._event_emit, event)
self._events_delayed[event.uuid] = id_
# add callback to cleanup self._events_delayed dict after
# event was called
id_.link(event_cleanup, event)
else:
self._event_emit(event)
def _event_emit(self, event):
if self._lifecycle_event_handler is not None:
self._lifecycle_event_handler(event)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
self._event_thread = native_threading.Thread(
target=self._native_thread)
self._event_thread.setDaemon(True)
self._event_thread.start()
LOG.debug("Starting green dispatch thread")
utils.spawn(self._dispatch_thread)
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug('Connecting to libvirt: %s', self._uri)
wrapped_conn = None
try:
wrapped_conn = self._connect(self._uri, self._read_only)
finally:
# Enabling the compute service, in case it was disabled
# since the connection was successful.
disable_reason = None
if not wrapped_conn:
disable_reason = 'Failed to connect to libvirt'
if self._conn_event_handler is not None:
self._conn_event_handler(bool(wrapped_conn), disable_reason)
self._wrapped_conn = wrapped_conn
try:
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warn(_LW("URI %(uri)s does not support events: %(error)s"),
{'uri': self._uri, 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except (TypeError, AttributeError) as e:
# NOTE: The registerCloseCallback of python-libvirt 1.0.1+
# is defined with 3 arguments, and the above registerClose-
# Callback succeeds. However, the one of python-libvirt 1.0.0
# is defined with 4 arguments and TypeError happens here.
# Then python-libvirt 0.9 does not define a method register-
# CloseCallback.
LOG.debug("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s", e)
except libvirt.libvirtError as e:
LOG.warn(_LW("URI %(uri)s does not support connection"
" events: %(error)s"),
{'uri': self._uri, 'error': e})
return wrapped_conn
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
wrapped_conn = self._wrapped_conn
if not wrapped_conn or not self._test_connection(wrapped_conn):
wrapped_conn = self._get_new_connection()
return wrapped_conn
def get_connection(self):
"""Returns a connection to the hypervisor
This method should be used to create and return a well
configured connection to the hypervisor.
:returns: a libvirt.virConnect object
"""
try:
conn = self._get_connection()
except libvirt.libvirtError as ex:
LOG.exception(_LE("Connection to libvirt failed: %s"), ex)
payload = dict(ip=CONF.my_ip,
method='_connect',
reason=ex)
rpc.get_notifier('compute').error(nova_context.get_admin_context(),
'compute.libvirt.error',
payload)
raise exception.HypervisorUnavailable(host=CONF.host)
return conn
@staticmethod
def _libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
def initialize(self):
# NOTE(dkliban): Error handler needs to be registered before libvirt
# connection is used for the first time. Otherwise, the
# handler does not get registered.
libvirt.registerErrorHandler(self._libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._init_events()
self._initialized = True
def _version_check(self, lv_ver=None, hv_ver=None, hv_type=None,
op=operator.lt):
"""Check libvirt version, hypervisor version, and hypervisor type
:param hv_type: hypervisor driver from the top of this file.
"""
conn = self.get_connection()
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if op(libvirt_version,
versionutils.convert_version_to_int(lv_ver)):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if op(hypervisor_version,
versionutils.convert_version_to_int(hv_ver)):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.lt)
def has_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.ne)
# TODO(sahid): needs to be private
def get_domain(self, instance):
"""Retrieve libvirt domain object for an instance.
:param instance: an nova.objects.Instance object
Attempt to lookup the libvirt domain objects
corresponding to the Nova instance, based on
its name. If not found it will raise an
exception.InstanceNotFound exception. On other
errors, it will raise an exception.NovaException
exception.
:returns: a libvirt.Domain object
"""
return self._get_domain_by_name(instance.name)
def get_guest(self, instance):
"""Retrieve libvirt domain object for an instance.
:param instance: an nova.objects.Instance object
:returns: a nova.virt.libvirt.Guest object
"""
return libvirt_guest.Guest(
self.get_domain(instance))
def _get_domain_by_id(self, instance_id):
"""Retrieve libvirt domain object given an instance id.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
conn = self.get_connection()
return conn.lookupByID(instance_id)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_id)
msg = (_("Error from libvirt while looking up %(instance_id)s: "
"[Error Code %(error_code)s] %(ex)s")
% {'instance_id': instance_id,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _get_domain_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
conn = self.get_connection()
return conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _list_instance_domains_fast(self, only_running=True):
# The modern (>= 0.9.13) fast way - 1 single API call for all domains
flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE
if not only_running:
flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE
return self.get_connection().listAllDomains(flags)
def _list_instance_domains_slow(self, only_running=True):
# The legacy (< 0.9.13) slow way - O(n) API call for n domains
uuids = []
doms = []
# Redundant numOfDomains check is for libvirt bz #836647
if self.get_connection().numOfDomains() > 0:
for id in self.get_connection().listDomainsID():
try:
dom = self._get_domain_by_id(id)
doms.append(dom)
uuids.append(dom.UUIDString())
except exception.InstanceNotFound:
continue
if only_running:
return doms
for name in self.get_connection().listDefinedDomains():
try:
dom = self._get_domain_by_name(name)
if dom.UUIDString() not in uuids:
doms.append(dom)
except exception.InstanceNotFound:
continue
return doms
def list_guests(self, only_running=True, only_guests=True):
"""Get a list of Guest objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
See method "list_instance_domains" for more information.
:returns: list of Guest objects
"""
return [libvirt_guest.Guest(dom) for dom in self.list_instance_domains(
only_running=only_running, only_guests=only_guests)]
def list_instance_domains(self, only_running=True, only_guests=True):
"""Get a list of libvirt.Domain objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
Query libvirt to a get a list of all libvirt.Domain objects
that correspond to nova instances. If the only_running parameter
is true this list will only include active domains, otherwise
inactive domains will be included too. If the only_guests parameter
is true the list will have any "host" domain (aka Xen Domain-0)
filtered out.
:returns: list of libvirt.Domain objects
"""
if not self._skip_list_all_domains:
try:
alldoms = self._list_instance_domains_fast(only_running)
except (libvirt.libvirtError, AttributeError) as ex:
LOG.info(_LI("Unable to use bulk domain list APIs, "
"falling back to slow code path: %(ex)s"),
{'ex': ex})
self._skip_list_all_domains = True
if self._skip_list_all_domains:
# Old libvirt, or a libvirt driver which doesn't
# implement the new API
alldoms = self._list_instance_domains_slow(only_running)
doms = []
for dom in alldoms:
if only_guests and dom.ID() == 0:
continue
doms.append(dom)
return doms
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
Method is only used by NUMA code paths which check on
libvirt version >= 1.0.4. getCPUMap() was introduced in
libvirt 1.0.0.
:returns: set of online CPUs, raises libvirtError on error
"""
(cpus, cpu_map, online) = self.get_connection().getCPUMap()
online_cpus = set()
for cpu in range(cpus):
if cpu_map[cpu]:
online_cpus.add(cpu)
return online_cpus
def get_capabilities(self):
"""Returns the host capabilities information
Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
Note: The result is cached in the member attribute _caps.
:returns: a config.LibvirtConfigCaps object
"""
if not self._caps:
xmlstr = self.get_connection().getCapabilities()
LOG.info(_LI("Libvirt host capabilities %s"), xmlstr)
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
# NOTE(mriedem): Don't attempt to get baseline CPU features
# if libvirt can't determine the host cpu model.
if (hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES')
and self._caps.host.cpu.model is not None):
try:
features = self.get_connection().baselineCPU(
[self._caps.host.cpu.to_xml()],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
# FIXME(wangpan): the return value of baselineCPU should be
# None or xml string, but libvirt has a bug
# of it from 1.1.2 which is fixed in 1.2.0,
# this -1 checking should be removed later.
if features and features != -1:
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warn(_LW("URI %(uri)s does not support full set"
" of host capabilities: %(error)s"),
{'uri': self._uri, 'error': ex})
else:
raise
return self._caps
def get_driver_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self.get_connection().getType()
def get_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
return self.get_connection().getVersion()
def get_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self.get_connection().getHostname()
if self._hostname is None:
self._hostname = hostname
elif hostname != self._hostname:
LOG.error(_LE('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.'),
{'old': self._hostname,
'new': hostname})
return self._hostname
def find_secret(self, usage_type, usage_id):
"""Find a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
if usage_type == 'iscsi':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_ISCSI
elif usage_type in ('rbd', 'ceph'):
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_CEPH
elif usage_type == 'volume':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_VOLUME
else:
msg = _("Invalid usage_type: %s")
raise exception.NovaException(msg % usage_type)
try:
conn = self.get_connection()
return conn.secretLookupByUsage(usage_type_const, usage_id)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_SECRET:
return None
def create_secret(self, usage_type, usage_id, password=None):
"""Create a secret.
:param usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
'rbd' will be converted to 'ceph'.
:param usage_id: name of resource in secret
:param password: optional secret value to set
"""
secret_conf = vconfig.LibvirtConfigSecret()
secret_conf.ephemeral = False
secret_conf.private = False
secret_conf.usage_id = usage_id
if usage_type in ('rbd', 'ceph'):
secret_conf.usage_type = 'ceph'
elif usage_type == 'iscsi':
secret_conf.usage_type = 'iscsi'
elif usage_type == 'volume':
secret_conf.usage_type = 'volume'
else:
msg = _("Invalid usage_type: %s")
raise exception.NovaException(msg % usage_type)
xml = secret_conf.to_xml()
try:
LOG.debug('Secret XML: %s' % xml)
conn = self.get_connection()
secret = conn.secretDefineXML(xml)
if password is not None:
secret.setValue(password)
return secret
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error defining a secret with XML: %s'), xml)
def delete_secret(self, usage_type, usage_id):
"""Delete a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
secret = self.find_secret(usage_type, usage_id)
if secret is not None:
secret.undefine()
def _get_hardware_info(self):
"""Returns hardware information about the Node.
Note that the memory size is reported in MiB instead of KiB.
"""
return self.get_connection().getInfo()
def get_cpu_count(self):
"""Returns the total numbers of cpu in the host."""
return self._get_hardware_info()[2]
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._get_hardware_info()[1]
def get_memory_mb_used(self):
"""Get the used memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt.virt_type == 'xen':
used = 0
for guest in self.list_guests(only_guests=False):
try:
# TODO(sahid): Use get_info...
dom_mem = int(guest._get_domain_info(self)[2])
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s"),
{"uuid": guest.uuid, "ex": e})
continue
# skip dom0
if guest.id != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used // units.Ki
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self.get_memory_mb_total() - avail // units.Ki
def get_cpu_stats(self):
"""Returns the current CPU state of the host with frequency."""
stats = self.get_connection().getCPUStats(
libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
# getInfo() returns various information about the host node
# No. 3 is the expected CPU frequency.
stats["frequency"] = self._get_hardware_info()[3]
return stats
def write_instance_config(self, xml):
"""Defines a domain, but does not start it.
:param xml: XML domain definition of the guest.
:returns: a virDomain instance
"""
return self.get_connection().defineXML(xml)
def device_lookup_by_name(self, name):
"""Lookup a node device by its name.
:returns: a virNodeDevice instance
"""
return self.get_connection().nodeDeviceLookupByName(name)
def list_pci_devices(self, flags=0):
"""Lookup pci devices.
:returns: a list of virNodeDevice instance
"""
return self.get_connection().listDevices("pci", flags)
def compare_cpu(self, xmlDesc, flags=0):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
def is_cpu_control_policy_capable(self):
"""Returns whether kernel configuration CGROUP_SCHED is enabled
CONFIG_CGROUP_SCHED may be disabled in some kernel configs to
improve scheduler latency.
"""
try:
with open("/proc/self/mounts", "r") as fd:
for line in fd.readlines():
# mount options and split options
bits = line.split()[3].split(",")
if "cpu" in bits:
return True
return False
except IOError:
return False
|
{
"content_hash": "89d5f18130d4518cb26aa2664896aefa",
"timestamp": "",
"source": "github",
"line_count": 997,
"max_line_length": 79,
"avg_line_length": 38,
"alnum_prop": 0.575278466979887,
"repo_name": "NeCTAR-RC/nova",
"id": "de22a6d00f89c9198ecacedc34babfdc99ad7e59",
"size": "38813",
"binary": false,
"copies": "6",
"ref": "refs/heads/nectar/mitaka",
"path": "nova/virt/libvirt/host.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17420185"
},
{
"name": "Shell",
"bytes": "36658"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
}
|
import os
import sys
import time
import json
import argparse
from st2client import models
from st2client.client import Client
END_STATES = ['succeeded', 'failed']
ST2HOST = 'localhost'
parser = argparse.ArgumentParser()
parser.add_argument('--name', action="store", dest="name", required=True)
parser.add_argument('--action', action="store", dest="action", required=True)
parser.add_argument('--params', action="store", dest="params")
parser.add_argument('--token', action="store", dest="token")
args = parser.parse_args()
runner = None
os.environ['ST2_AUTH_TOKEN'] = args.token
def runAction(action_ref, params):
client = Client()
action_exec_mgr = client.managers['LiveAction']
execution = models.LiveAction()
execution.action = action_ref
execution.parameters = param_parser(params)
actionexec = action_exec_mgr.create(execution)
while actionexec.status not in END_STATES:
time.sleep(2)
actionexec = action_exec_mgr.get_by_id(actionexec.id)
return actionexec
def param_parser(params):
parameters = {}
if params is not None:
param_list = params.split(' ')
for p in param_list:
if '=' in p:
k, v = p.split('=', 1)
if ',' in v:
v = filter(None, v.split(','))
else:
k = 'cmd'
v = p
parameters[k] = v
return parameters
actionexec = runAction(action_ref=args.action, params=args.params)
output = {args.name: actionexec.result}
print json.dumps(output)
if actionexec.status != 'succeeded':
sys.exit(2)
|
{
"content_hash": "568d68a5ef2d669af95fec569166181b",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 77,
"avg_line_length": 26.016129032258064,
"alnum_prop": 0.6354618722876627,
"repo_name": "StackStorm/st2cd",
"id": "b2fd76a6fa48094dec53e5fb9f8ab290b63ba36d",
"size": "1639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "actions/action_run.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9154"
},
{
"name": "Shell",
"bytes": "86229"
}
],
"symlink_target": ""
}
|
from website import settings
from website.project import Node
from modularodm.query.querydialect import DefaultQueryDialect as Q
from framework.analytics.piwik import PiwikClient
def activity():
popular_public_projects = []
popular_public_registrations = []
hits = {}
if settings.PIWIK_HOST:
client = PiwikClient(
url=settings.PIWIK_HOST,
auth_token=settings.PIWIK_ADMIN_TOKEN,
site_id=settings.PIWIK_SITE_ID,
period='week',
date='today',
)
popular_project_ids = [
x for x in client.custom_variables if x.label == 'Project ID'
][0].values
for nid in popular_project_ids:
node = Node.load(nid.value)
if node is None:
continue
if node.is_public and not node.is_registration:
if len(popular_public_projects) < 10:
popular_public_projects.append(node)
elif node.is_public and node.is_registration:
if len(popular_public_registrations) < 10:
popular_public_registrations.append(node)
if len(popular_public_projects) >= 10 and len(popular_public_registrations) >= 10:
break
hits = {
x.value: {
'hits': x.actions,
'visits': x.visits
} for x in popular_project_ids
}
# Projects
recent_query = (
Q('category', 'eq', 'project') &
Q('is_public', 'eq', True) &
Q('is_deleted', 'eq', False)
)
# Temporary bug fix: Skip projects with empty contributor lists
# Todo: Fix underlying bug and remove this selector
recent_query = recent_query & Q('contributors', 'ne', [])
recent_public_projects = Node.find(
recent_query &
Q('is_registration', 'eq', False)
).sort(
'-date_created'
).limit(10)
# Registrations
recent_public_registrations = Node.find(
recent_query &
Q('is_registration', 'eq', True)
).sort(
'-registered_date'
).limit(10)
return {
'recent_public_projects': recent_public_projects,
'recent_public_registrations': recent_public_registrations,
'popular_public_projects': popular_public_projects,
'popular_public_registrations': popular_public_registrations,
'hits': hits,
}
|
{
"content_hash": "12ed2e818f0856c3319c5be0f402ee0d",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 94,
"avg_line_length": 30.59493670886076,
"alnum_prop": 0.5771617707902358,
"repo_name": "AndrewSallans/osf.io",
"id": "c4c75afa4e1f879cad514608e26d778e928bb5c9",
"size": "2417",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "website/discovery/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "70439"
},
{
"name": "JavaScript",
"bytes": "2555546"
},
{
"name": "Python",
"bytes": "2159449"
}
],
"symlink_target": ""
}
|
"""
main.py -- Udacity conference server-side Python App Engine
HTTP controller handlers for memcache & task queue access
$Id$
created by wesc on 2014 may 24
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from conference import ConferenceApi
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
ConferenceApi._cacheAnnouncement()
self.response.set_status(204)
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.get_application_id()), # from
self.request.get('email'), # to
'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
class FindFeaturedSpeakerHandler(webapp2.RequestHandler):
def post(self):
"""Find speakers with more than one session at a conference"""
websafe_speaker_key = self.request.get('websafe_speaker_key')
wsck = self.request.get('wsck')
conf_sessions = ConferenceApi._getConferenceSessions(wsck)
if conf_sessions:
filtered_sessions, speaker_name = (
(ConferenceApi._filterSessionsBySpeaker(
conf_sessions, websafe_speaker_key)))
number_of_sessions = filtered_sessions.count()
if number_of_sessions >= 2:
session_name = self.request.get('session_name')
ConferenceApi._cacheFeaturedSpeaker(session_name, speaker_name)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/find_featured_speaker', FindFeaturedSpeakerHandler),
], debug=True)
|
{
"content_hash": "556759382dcfe3d1d88bc42e3e2a8906",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 79,
"avg_line_length": 35.266666666666666,
"alnum_prop": 0.6427221172022685,
"repo_name": "poAndroid/p4_conference",
"id": "3f2faf62f4eaaa306388e61b6f884adca6757a6f",
"size": "2139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "23913"
},
{
"name": "JavaScript",
"bytes": "32836"
},
{
"name": "Python",
"bytes": "52837"
}
],
"symlink_target": ""
}
|
import collections
from hopper.utils.logger import *
from BaseField import *
from hopper.utils.Collections import *
# Marked options (e.g. './app -t' or './app --test')
class Option(BaseField):
def __init__(self, shortname, longname, description = None,
multiple = False, required = False, default = None,
validoperators = None):
BaseField.__init__(self, description, multiple, required, default)
self.shortnames = []
self.longnames = []
self.shortnames = getAsListOrIterable(shortname)
self.longnames = getAsListOrIterable(longname)
self.validoperators = validoperators
def __matchesName__(self, name, longarg = False):
for n in (self.longnames if longarg else self.shortnames):
if name == n:
return True
return False
def __matchesOption__(self, instance, arg, valueRequirement = False, valueAllowed = False):
if arg[0] == '-' or arg[0:2] == '--':
matched = False
longarg = True if (arg[0:2] == '--') else False
# Seperate the '-' from the value of the arg
if longarg:
valuearg = arg[2:]
else:
valuearg = arg[1:]
# Split the arg into fields
fields = None
operator = "="
if self.validoperators:
for i in self.validoperators:
if i in valuearg:
operator = i
break
# split at operators
fields = valuearg.split(operator, 1)
if fields:
# Check if the name matches this option
nameMatches = self.__matchesName__(fields[0], longarg)
if nameMatches:
if len(fields) > 1 and not(valueAllowed):
warning("Argument '%s' has value, but value is not expected." % arg)
return (False, False, None, None)
elif len(fields) == 1 and valueRequirement:
warning("Argument '%s' is missing value, but value is expected." % arg)
return (False, False, None, None)
# Check multiple requirements
if (not(instance.empty()) and self.multiple) or instance.empty():
if len(fields) > 1:
strippedvalue = fields[1].strip("'\"")
return (True, True, fields[0], (operator, strippedvalue))
else:
return (True, True, fields[0], None)
elif (not(instance.empty()) and not(self.multiple)):
warning("Invalid Argument: Only a single '%s' option can be provided" % arg)
return (True, False, None, None)
return (False, False, None, None)
def match(self, instance, arg, position = None):
matches = self.__matchesOption__(instance, arg, False, False)
if matches[0]:
return True
return False
def __repr__(self):
reprString = "CommandOption:"
names = []
for shortname in self.shortnames:
names.append("-%s" % shortname)
for longname in self.longnames:
names.append("--%s" % longname)
if len(names) != 0:
reprString += " names = [ %s ]" % ", ".join(names)
options = []
if self.multiple:
options.append("multiple")
if self.required:
options.append("required")
if len(options) != 0:
reprString += " (%s)" % ", ".join(options)
return reprString
|
{
"content_hash": "5951a09494ec9877fe5bc99a5fb3e808",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 92,
"avg_line_length": 29.08823529411765,
"alnum_prop": 0.6461071789686552,
"repo_name": "Xilinx/hopper",
"id": "32cd21d02a807e41374508b802e2c68dbc992cfa",
"size": "4058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hopper/utils/args/Option.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "150323"
}
],
"symlink_target": ""
}
|
import string
solve = "pythonchallenge.com/pc/def/map.html"
def shift_cypher(value, orig):
finish = ""
for i in orig:
if i.isalpha(): # cypher only works with letters
if ord(i) + value > 122: # 122 is the value for 'z'. For looping value back to 'a'
i = chr(97 + (ord(i) + value - 122) - 1) # 97 is 'a', -1 to add value properly
else:
i = chr(ord(i) + value)
finish += i
return finish
def trans():
shift = string.maketrans("abcdefghijklmnopqrstuvwxyz", "cdefghijklmnopqrstuvwxyzab")
print solve.translate(shift)
# print shift_cypher(2, solve)
# trans()
|
{
"content_hash": "b3e66e676fa755d7bf48d784fdea8e5c",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 85,
"avg_line_length": 19.8,
"alnum_prop": 0.6481481481481481,
"repo_name": "yarabarla/python-challenge",
"id": "531504bfe53e0de67d57be5668d904752b20036f",
"size": "644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2909"
}
],
"symlink_target": ""
}
|
import abc
import six
from nova import keymgr
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class VolumeEncryptor(object):
"""Base class to support encrypted volumes.
A VolumeEncryptor provides hooks for attaching and detaching volumes, which
are called immediately prior to attaching the volume to an instance and
immediately following detaching the volume from an instance. This class
performs no actions for either hook.
"""
def __init__(self, connection_info, **kwargs):
self._key_manager = keymgr.API()
self.encryption_key_id = kwargs.get('encryption_key_id')
def _get_key(self, context):
"""Retrieves the encryption key for the specified volume.
:param: the connection information used to attach the volume
"""
return self._key_manager.get_key(context, self.encryption_key_id)
@abc.abstractmethod
def attach_volume(self, context, **kwargs):
"""Hook called immediately prior to attaching a volume to an instance.
"""
pass
@abc.abstractmethod
def detach_volume(self, **kwargs):
"""Hook called immediately after detaching a volume from an instance.
"""
pass
|
{
"content_hash": "72d8b2a28304a2bc39e0ddbde7176d03",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 79,
"avg_line_length": 29.906976744186046,
"alnum_prop": 0.6835147744945568,
"repo_name": "sacharya/nova",
"id": "ac5331c8aa3b5733f3b0a983bea81d496bfe948e",
"size": "2007",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/volume/encryptors/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13505239"
},
{
"name": "Shell",
"bytes": "16239"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe
from frappe import _
import frappe.defaults
from frappe.modules.import_file import get_file_path, read_doc_from_file
from frappe.translate import send_translations
from frappe.core.doctype.doctype.doctype import (clear_permissions_cache,
validate_permissions_for_doctype)
from frappe.permissions import (reset_perms, get_linked_doctypes, get_all_perms,
setup_custom_perms, add_permission, update_permission_property)
not_allowed_in_permission_manager = ["DocType", "Patch Log", "Module Def"]
@frappe.whitelist()
def get_roles_and_doctypes():
frappe.only_for("System Manager")
send_translations(frappe.get_lang_dict("doctype", "DocPerm"))
active_domains = frappe.get_active_domains()
doctypes = frappe.get_all("DocType", filters={
"istable": 0,
"name": ("not in", ",".join(not_allowed_in_permission_manager)),
}, or_filters={
"ifnull(restrict_to_domain, '')": "",
"restrict_to_domain": ("in", active_domains)
}, fields=["name"])
roles = frappe.get_all("Role", filters={
"name": ("not in", "Administrator"),
"disabled": 0,
}, or_filters={
"ifnull(restrict_to_domain, '')": "",
"restrict_to_domain": ("in", active_domains)
}, fields=["name"])
doctypes_list = [ {"label":_(d.get("name")), "value":d.get("name")} for d in doctypes]
roles_list = [ {"label":_(d.get("name")), "value":d.get("name")} for d in roles]
return {
"doctypes": sorted(doctypes_list, key=lambda d: d['label']),
"roles": sorted(roles_list, key=lambda d: d['label'])
}
@frappe.whitelist()
def get_permissions(doctype=None, role=None):
frappe.only_for("System Manager")
if role:
out = get_all_perms(role)
if doctype:
out = [p for p in out if p.parent == doctype]
else:
out = frappe.get_all('Custom DocPerm', fields='*', filters=dict(parent = doctype), order_by="permlevel")
if not out:
out = frappe.get_all('DocPerm', fields='*', filters=dict(parent = doctype), order_by="permlevel")
linked_doctypes = {}
for d in out:
if not d.parent in linked_doctypes:
linked_doctypes[d.parent] = get_linked_doctypes(d.parent)
d.linked_doctypes = linked_doctypes[d.parent]
meta = frappe.get_meta(d.parent)
if meta:
d.is_submittable = meta.is_submittable
return out
@frappe.whitelist()
def add(parent, role, permlevel):
frappe.only_for("System Manager")
add_permission(parent, role, permlevel)
@frappe.whitelist()
def update(doctype, role, permlevel, ptype, value=None):
frappe.only_for("System Manager")
out = update_permission_property(doctype, role, permlevel, ptype, value)
return 'refresh' if out else None
@frappe.whitelist()
def remove(doctype, role, permlevel):
frappe.only_for("System Manager")
setup_custom_perms(doctype)
name = frappe.get_value('Custom DocPerm', dict(parent=doctype, role=role, permlevel=permlevel))
frappe.db.sql('delete from `tabCustom DocPerm` where name=%s', name)
if not frappe.get_all('Custom DocPerm', dict(parent=doctype)):
frappe.throw(_('There must be atleast one permission rule.'), title=_('Cannot Remove'))
validate_permissions_for_doctype(doctype, for_remove=True)
@frappe.whitelist()
def reset(doctype):
frappe.only_for("System Manager")
reset_perms(doctype)
clear_permissions_cache(doctype)
@frappe.whitelist()
def get_users_with_role(role):
frappe.only_for("System Manager")
return [p[0] for p in frappe.db.sql("""select distinct tabUser.name
from `tabHas Role`, tabUser where
`tabHas Role`.role=%s
and tabUser.name != "Administrator"
and `tabHas Role`.parent = tabUser.name
and tabUser.enabled=1""", role)]
@frappe.whitelist()
def get_standard_permissions(doctype):
frappe.only_for("System Manager")
doc = frappe.get_doc('DocType', doctype)
return [p.as_dict() for p in doc.permissions]
|
{
"content_hash": "ddadb557f3205fbd30d4ccc495abc6ea",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 106,
"avg_line_length": 33.5625,
"alnum_prop": 0.7081670657089651,
"repo_name": "paurosello/frappe",
"id": "cc5a2998499dc5551c53e690ea74b1b574a6ab76",
"size": "3860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frappe/core/page/permission_manager/permission_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "422931"
},
{
"name": "HTML",
"bytes": "202357"
},
{
"name": "JavaScript",
"bytes": "1858011"
},
{
"name": "Makefile",
"bytes": "29"
},
{
"name": "Python",
"bytes": "2042290"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
from ingenico.connect.sdk.data_object import DataObject
class Level3SummaryData(DataObject):
"""
Deprecated; Use ShoppingCart.amountBreakdown instead
"""
__discount_amount = None
__duty_amount = None
__shipping_amount = None
@property
def discount_amount(self):
"""
| Discount on the entire transaction, with the last 2 digits are implied decimal places
Type: int
Deprecated; Use ShoppingCart.amountBreakdown with type DISCOUNT instead
"""
return self.__discount_amount
@discount_amount.setter
def discount_amount(self, value):
self.__discount_amount = value
@property
def duty_amount(self):
"""
| Duty on the entire transaction, with the last 2 digits are implied decimal places
Type: int
Deprecated; Use ShoppingCart.amountBreakdown with type DUTY instead
"""
return self.__duty_amount
@duty_amount.setter
def duty_amount(self, value):
self.__duty_amount = value
@property
def shipping_amount(self):
"""
| Shippingcost on the entire transaction, with the last 2 digits are implied decimal places
Type: int
Deprecated; Use ShoppingCart.amountBreakdown with type SHIPPING instead
"""
return self.__shipping_amount
@shipping_amount.setter
def shipping_amount(self, value):
self.__shipping_amount = value
def to_dictionary(self):
dictionary = super(Level3SummaryData, self).to_dictionary()
if self.discount_amount is not None:
dictionary['discountAmount'] = self.discount_amount
if self.duty_amount is not None:
dictionary['dutyAmount'] = self.duty_amount
if self.shipping_amount is not None:
dictionary['shippingAmount'] = self.shipping_amount
return dictionary
def from_dictionary(self, dictionary):
super(Level3SummaryData, self).from_dictionary(dictionary)
if 'discountAmount' in dictionary:
self.discount_amount = dictionary['discountAmount']
if 'dutyAmount' in dictionary:
self.duty_amount = dictionary['dutyAmount']
if 'shippingAmount' in dictionary:
self.shipping_amount = dictionary['shippingAmount']
return self
|
{
"content_hash": "f95378937ad3329a0d2bcc31bd5b5ed5",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 99,
"avg_line_length": 31.38157894736842,
"alnum_prop": 0.6352201257861635,
"repo_name": "Ingenico-ePayments/connect-sdk-python3",
"id": "93363c3b90e82fd68ac267a30d41214741e68bb3",
"size": "2536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ingenico/connect/sdk/domain/payment/definitions/level3_summary_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "36"
},
{
"name": "Python",
"bytes": "1735057"
}
],
"symlink_target": ""
}
|
from panda3d.core import LVector3f
import DNAGroup
import DNABattleCell
import DNAUtil
class DNAVisGroup(DNAGroup.DNAGroup):
COMPONENT_CODE = 2
def __init__(self, name):
DNAGroup.DNAGroup.__init__(self, name)
self.visibles = []
self.suitEdges = []
self.battleCells = []
def getVisGroup(self):
return self
def addBattleCell(self, battleCell):
self.battleCells.append(battleCell)
def addSuitEdge(self, suitEdge):
self.suitEdges.append(suitEdge)
def addVisible(self, visible):
self.visibles.append(visible)
def getBattleCell(self, i):
return self.battleCells[i]
def getNumBattleCells(self):
return len(self.battleCells)
def getNumSuitEdges(self):
return len(self.suitEdges)
def getNumVisibles(self):
return len(self.visibles)
def getSuitEdge(self, i):
return self.suitEdges[i]
def getVisibleName(self, i):
return self.visibles[i]
def removeBattleCell(self, cell):
self.battleCells.remove(cell)
def removeSuitEdge(self, edge):
self.suitEdges.remove(edge)
def removeVisible(self, visible):
self.visibles.remove(visible)
def makeFromDGI(self, dgi, dnaStorage):
DNAGroup.DNAGroup.makeFromDGI(self, dgi)
numEdges = dgi.getUint16()
for _ in xrange(numEdges):
index = dgi.getUint16()
endPoint = dgi.getUint16()
self.addSuitEdge(dnaStorage.getSuitEdge(index, endPoint))
numVisibles = dgi.getUint16()
for _ in xrange(numVisibles):
self.addVisible(DNAUtil.dgiExtractString8(dgi))
numCells = dgi.getUint16()
for _ in xrange(numCells):
w = dgi.getUint8()
h = dgi.getUint8()
x, y, z = [dgi.getInt32() / 100.0 for i in xrange(3)]
self.addBattleCell(DNABattleCell.DNABattleCell(w, h, LVector3f(x, y, z)))
|
{
"content_hash": "9e4f5e13f93e4504478dca1a59a359d7",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 85,
"avg_line_length": 27.180555555555557,
"alnum_prop": 0.629535002554931,
"repo_name": "Spiderlover/Toontown",
"id": "4a8e5e9d15eaa5c1a2e94048ede4309dc24808a5",
"size": "1957",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "toontown/dna/DNAVisGroup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7774"
},
{
"name": "Python",
"bytes": "17241353"
},
{
"name": "Shell",
"bytes": "7699"
}
],
"symlink_target": ""
}
|
"""
Django settings for karaoke project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY", "")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ["slidesharekaraoke.com", ".slidesharekaraoke.com"]
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'main',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'karaoke.urls'
WSGI_APPLICATION = 'karaoke.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['slidesharekaraoke.com']
# Static asset configuration
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'main/static/main'),
)
|
{
"content_hash": "a4e468ccc8b78779678a7331acc72638",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 71,
"avg_line_length": 25.28846153846154,
"alnum_prop": 0.7258555133079848,
"repo_name": "xoneco/slidesharekaraoke",
"id": "2807cc2f535c5344b65afd35c7cf31360e40298c",
"size": "2630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "karaoke/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "124916"
},
{
"name": "HTML",
"bytes": "25275"
},
{
"name": "JavaScript",
"bytes": "5355"
},
{
"name": "Python",
"bytes": "9676"
}
],
"symlink_target": ""
}
|
"""Tests for the ResourceCache class"""
import copy
import mock
import pytest
from swimlane.core.cache import ResourcesCache, check_cache
from swimlane.core.resources.base import APIResource
from swimlane.core.resources.record import Record
def test_len(mock_app, mock_record):
"""Test cache length includes all cached resources"""
cache = ResourcesCache(10)
assert len(cache) == 0
cache.cache(mock_app)
assert len(cache) == 1
# Ignore duplicates
cache.cache(mock_app)
assert len(cache) == 1
cache.cache(mock_record)
assert len(cache) == 2
def test_separate_resource_caches(mock_app, mock_record):
"""Test each resource class has a separate queue of max size instead of a single global queue"""
# Max cache size of 1 per resource type
cache = ResourcesCache(1)
cache.cache(mock_app)
cache.cache(mock_record)
assert len(cache) == 2
# Create another record instance
other_record = copy.copy(mock_record)
other_record.id = mock_record.id + '123'
# Check that only one record is still in the cache
cache.cache(other_record)
assert len(cache) == 2
def test_item_in_cache(mock_record):
"""Test checking if item exists in cache"""
cache = ResourcesCache(5)
assert mock_record not in cache
cache.cache(mock_record)
assert mock_record in cache
def test_get_item_from_cache(mock_record):
"""Test retrieving item from cache, and that item is a copy instead of reference to same instance"""
cache = ResourcesCache(5)
cache_key = (type(mock_record), 'id', mock_record.id)
# Attempt to get before record is in cache
with pytest.raises(KeyError):
cached_record = cache[cache_key]
cache.cache(mock_record)
cached_record = cache[cache_key]
assert cached_record == mock_record
assert cached_record is not mock_record
# Prove failure after clearing cache
cache.clear()
with pytest.raises(KeyError):
cached_record = cache[cache_key]
def test_clear(mock_app, mock_record):
"""Test clearing individual and all resources from cache"""
cache = ResourcesCache(5)
cache.cache(mock_app)
cache.cache(mock_record)
assert len(cache) == 2
# Clear by resource class type
cache.clear(type(mock_app))
assert len(cache) == 1
assert mock_app not in cache
assert mock_record in cache
cache.cache(mock_app)
assert len(cache) == 2
# Clear all caches
cache.clear()
assert len(cache) == 0
assert mock_app not in cache
assert mock_record not in cache
def test_check_cache_decorator(mock_swimlane, mock_record):
"""Check that decorator prevents actual function call on cache hits and defers to normal call on cache misses"""
expected_miss_sentinel = object()
mock_func = mock.MagicMock()
mock_func.return_value=expected_miss_sentinel
mock_func.__name__ = 'mock_func'
decorated_func = check_cache(Record)(mock_func)
mock_adapter = mock.MagicMock()
mock_swimlane.resources_cache = ResourcesCache(5)
mock_adapter._swimlane = mock_swimlane
# Record not in cache yet, should call the actual mock_func with original inputs
assert decorated_func(mock_adapter, id=mock_record.id) is expected_miss_sentinel
mock_func.assert_called_once_with(mock_adapter, id=mock_record.id)
# Record is returned from cache, shouldn't call the actual mock_func again
mock_swimlane.resources_cache.cache(mock_record)
assert decorated_func(mock_adapter, id=mock_record.id) == mock_record
assert mock_func.call_count == 1
def test_unsupported_api_resource_instance(mock_swimlane):
"""Test that APIResource instances not returning all cache details are ignored but don't fail"""
cache = ResourcesCache(5)
cache.cache(APIResource(mock_swimlane, {}))
assert len(cache) == 0
@pytest.mark.parametrize('item', [
object(),
None,
{},
[],
'string',
123
])
def test_cache_unsupported_type(item):
"""Test attempting to cache a non-APIResource instance fails"""
cache = ResourcesCache(5)
with pytest.raises(TypeError):
cache.cache(item)
@pytest.mark.parametrize('key', [
object(),
None,
{},
[],
(),
('string', 'wrong_leading_type', 'value'),
(object, 'wrong_leading_type', 'value'),
(Record, 'wrong_length'),
(Record, 'wrong_length', 'wrong_length', 'wrong_length'),
])
def test_cache_invalid_index_key(key):
"""Test exception raised when attempting lookup with invalid index key"""
cache = ResourcesCache(5)
with pytest.raises(TypeError):
item = cache[key]
|
{
"content_hash": "bb05e0f1e4782db64192792c1ca492ce",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 116,
"avg_line_length": 25.70718232044199,
"alnum_prop": 0.6806361487212551,
"repo_name": "Swimlane/sw-python-client",
"id": "ae93cfda68b2bb30534cd60fd3be948c0f5a9b0d",
"size": "4653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "338"
},
{
"name": "Python",
"bytes": "50912"
}
],
"symlink_target": ""
}
|
from app import app
from flask.ext.compress import Compress
app.run(debug=False, port=8080)
Compress(app)
|
{
"content_hash": "20b0af9eacf9c5324de7c73acc793432",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 39,
"avg_line_length": 21.4,
"alnum_prop": 0.794392523364486,
"repo_name": "starcalibre/MNIST3D",
"id": "5ac489c6bd760bbf0ae612b816319d9fdd5b29e6",
"size": "130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4787"
},
{
"name": "JavaScript",
"bytes": "29605"
},
{
"name": "Python",
"bytes": "2217"
}
],
"symlink_target": ""
}
|
import os
import re
__all__ = [
'branch_to_db',
'branch_to_domain',
'branch_to_slug',
'branch_to_url',
'get_active_branch_name',
]
def get_active_branch_name(path=None):
from git import Repo
return Repo(path or os.getcwd()).active_branch.name
def branch_to_domain(branch_name, domain_pattern=None):
"""
Convert a git branch name into a valid domain string.
"""
# obtain domain name using regex
if domain_pattern:
if isinstance(domain_pattern, (tuple, list)):
pattern, replacer = domain_pattern
else:
pattern, replacer = domain_pattern, None
match_obj = re.search(pattern, branch_name, flags=re.I)
if match_obj:
if replacer:
return re.sub(pattern, replacer, branch_name, flags=re.I)
else:
return match_obj.group(1)
# replace all non-alphanumeric characters with a hyphen
domain = re.sub(r'[^a-z0-9\-]', '-', branch_name.lower())
# replace double hyphens with a single character
return re.sub(r'-{2,}', '-', domain)
def branch_to_url(base_domain, branch_name, domain_pattern=None):
domain = branch_to_domain(branch_name, domain_pattern)
if domain == 'master':
return base_domain
return f'{domain}.{base_domain}'
def branch_to_slug(branch_name, **kwargs):
"""
Convert branch name to valid slug
"""
return branch_to_domain(branch_name.split('/', 1)[-1], **kwargs)
def branch_to_db(branch_name, **kwargs):
"""
Convert branch name to valid database name
"""
return branch_to_domain(branch_name, **kwargs).replace('-', '')
|
{
"content_hash": "90e511d9bb4c4e7269ebfc04654bc665",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 73,
"avg_line_length": 27.616666666666667,
"alnum_prop": 0.6155703077851539,
"repo_name": "MyBook/fabric-utils",
"id": "51edd8d271d7d20a0e853689016224bfb223e01e",
"size": "1657",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fabric_utils/git.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "20319"
}
],
"symlink_target": ""
}
|
from model.contact import Contact
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def open_contact_page(self):
wd = self.app.wd
if not (wd.current_url.endswith("/index.php") and len(wd.find_elements_by_name("searchstring")) > 0):
wd.find_element_by_link_text("home").click()
def create(self, contact):
wd = self.app.wd
# init contact creation
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
# submit contact creation
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.contact_cache = None
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_field_value("firstname", contact.firstname)
self.change_field_value("middlename", contact.middlename)
self.change_field_value("lastname", contact.lastname)
self.change_field_value("nickname", contact.nickname)
self.change_field_value("title", contact.title)
self.change_field_value("company", contact.company)
self.change_field_value("address", contact.address)
self.change_field_value("home", contact.home_phone)
self.change_field_value("mobile", contact.mobile_phone)
self.change_field_value("work", contact.work_phone)
self.change_field_value("fax", contact.fax)
self.change_field_value("email", contact.email)
self.change_field_value("email2", contact.email2)
self.change_field_value("email3", contact.email3)
self.change_field_value("homepage", contact.homepage)
self.change_field_value("email", contact.email)
self.change_field_value("email2", contact.email2)
self.change_field_value("homepage", contact.homepage)
if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[8]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[8]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[10]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[10]").click()
self.change_field_value("byear", contact.birth_date)
self.change_field_value("address2", contact.address2)
self.change_field_value("phone2", contact.phone2)
self.change_field_value("notes", contact.notes)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.open_contact_page()
self.select_contact_by_index(index)
# submit deletion
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
# confirm deletion
wd.switch_to_alert().accept()
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.open_contact_page()
self.select_contact_by_id(id)
# submit deletion
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
# confirm deletion
wd.switch_to_alert().accept()
self.contact_cache = None
def delete_first_contact(self):
self.delete_contact_by_index(0)
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_contact_by_id(self, id):
wd = self.app.wd
checkbox = wd.find_element_by_css_selector("input[value='%s']" % id)
checkbox.click()
return checkbox
def modify_contact_by_index(self, index, new_contact_data):
wd = self.app.wd
self.open_contact_page()
self.choose_contact_by_index(index)
self.fill_contact_form(new_contact_data)
# submit contact edition
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.contact_cache = None
def modify_contact_by_id(self, id, new_contact_data):
wd = self.app.wd
self.open_contact_page()
checkbox = self.select_contact_by_id(id)
row = checkbox.find_element_by_xpath("./../..")
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
# self.choose_contact_by_id(id)
self.fill_contact_form(new_contact_data)
# submit contact edition
wd.find_element_by_xpath("//div[@id='content']/form[1]/input[22]").click()
self.contact_cache = None
def modify_first_contact(self):
self.modify_contact_by_index(0)
def choose_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_xpath("//table[@id='maintable']/tbody/tr/td[8]/a/img")[index].click()
def choose_contact_by_id(self, id):
wd = self.app.wd
# wd.find_elements_by_xpath("//table[@id='maintable']/tbody/tr/td[8]/a/img")[index].click()
wd.find_element_by_css_selector("input[value='%s']" % id).click()
def count(self):
wd = self.app.wd
self.open_contact_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
wd = self.app.wd
if self.contact_cache is None:
self.open_contact_page()
self.contact_cache = []
for element in wd.find_elements_by_name("entry"):
id = element.find_element_by_name("selected[]").get_attribute("value")
cells = element.find_elements_by_tag_name("td")
lastname = cells[1].text
firstname = cells[2].text
all_phones = cells[5].text
address = cells[3].text
all_emails = cells[4].text
self.contact_cache.append(Contact(id=id, firstname=firstname, lastname=lastname,
all_phones_from_home_page=all_phones, address=address, all_emails_from_home_page=all_emails))
return self.contact_cache
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.open_contact_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
home_phone = wd.find_element_by_name("home").get_attribute("value")
mobile_phone = wd.find_element_by_name("mobile").get_attribute("value")
work_phone = wd.find_element_by_name("work").get_attribute("value")
phone2 = wd.find_element_by_name("phone2").get_attribute("value")
address = wd.find_element_by_name("address").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
return Contact(firstname=firstname, lastname=lastname, id=id,
home_phone=home_phone, mobile_phone=mobile_phone,
work_phone=work_phone, phone2=phone2, address=address,
email=email, email2=email2, email3=email3)
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.open_contact_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
home_phone = re.search("H: (.*)", text).group(1)
mobile_phone = re.search("M: (.*)", text).group(1)
work_phone = re.search("W: (.*)", text).group(1)
phone2 = re.search("P: (.*)", text).group(1)
return Contact(home_phone=home_phone, mobile_phone=mobile_phone,
work_phone=work_phone, phone2=phone2)
def add_contact_to_group(self, contact, group):
wd = self.app.wd
self.select_contact_by_id(contact.id)
wd.find_element_by_name("to_group").find_element_by_css_selector("option[value=\"%s\"]" % group.id).click()
wd.find_element_by_name("add").click()
wd.find_element_by_partial_link_text("group page").click()
def delete_contact_from_group(self, contact, group):
wd = self.app.wd
wd.find_element_by_name("group").find_element_by_css_selector("option[value=\"%s\"]" % group.id).click()
self.select_contact_by_id(contact.id)
wd.find_element_by_name("remove").click()
wd.find_element_by_partial_link_text("group page").click()
def select_group(self, group):
wd = self.app.wd
wd.find_element_by_name("group").find_element_by_css_selector("option[value=\"%s\"]" % group.id).click()
|
{
"content_hash": "9ec9fc3042d98b9852442785fe2b167d",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 143,
"avg_line_length": 44.867924528301884,
"alnum_prop": 0.6094407064760303,
"repo_name": "kasiazubielik/python_training",
"id": "2a09920aa51371b51e1cbdf9deed5b6fada2a900",
"size": "9512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/contact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "46266"
}
],
"symlink_target": ""
}
|
from function_type import BuiltinFunction
class SubMethod(BuiltinFunction):
def __init__(self):
super().__init__(
self.SUB_METHOD,
pos_args=["self", "other"]
)
|
{
"content_hash": "421178f6c8b52d8607d628160fbdb93a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 41,
"avg_line_length": 22.88888888888889,
"alnum_prop": 0.558252427184466,
"repo_name": "PiJoules/python-type-inference",
"id": "f069b2a09518eec0828200d3fd68f92a01023141",
"size": "206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magic_methods/sub_method.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "104822"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
class PaymentMethodManager(models.Manager):
def get_query_set(self):
qs = super(PaymentMethodManager, self).get_query_set()
return qs.filter(is_enabled=True)
class PaymentMethod(models.Model):
TYPE_BUYSAFE = 0
TYPE_WEBATM = 1
TYPE_24PAY = 2
TYPE_PAYCODE = 3
TYPE_CHOICES = (
(TYPE_BUYSAFE, _('BuySafe')),
(TYPE_WEBATM, _('Web ATM')),
(TYPE_24PAY, _('24Pay')),
(TYPE_PAYCODE, _('PayCode'))
)
name = models.CharField(max_length=100, verbose_name=_('name'))
payment_type = models.IntegerField(choices=TYPE_CHOICES)
content = models.TextField(blank=True, verbose_name=_('content'))
store_id = models.CharField(
max_length=11, verbose_name=_('SunTech store ID'),
help_text=_(
'Should be exactly 10 characters, starting with a capitalized '
'alphabet.'
)
)
password = models.CharField(
max_length=20, verbose_name=_('SunTech password'),
help_text=_(
'This should usually be the transaction password, which is an '
'alphanumeric string with length between 8 to 20.'
)
)
is_enabled = models.BooleanField(default=True, verbose_name=_('enabled'))
enabled = PaymentMethodManager()
objects = models.Manager()
class Meta:
verbose_name = _('Payment method')
verbose_name_plural = _('Payment methods')
ordering = ['-is_enabled', 'payment_type', '-id']
def save(self, *args, **kwargs):
super(PaymentMethod, self).save(*args, **kwargs)
if self.is_enabled:
(PaymentMethod.objects.exclude(id=self.id)
.filter(payment_type=self.payment_type,
is_enabled=True)
.update(is_enabled=False))
def __unicode__(self):
return self.name
|
{
"content_hash": "de83141c2af60eb836e7dd46fec84f9b",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 33.94827586206897,
"alnum_prop": 0.5931945149822245,
"repo_name": "uranusjr/django-buysafe",
"id": "ffdb88ab79bb51fcf7750da289794eea855e218f",
"size": "1969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "buysafe/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "29591"
}
],
"symlink_target": ""
}
|
import random
from string import ascii_lowercase
from unittest import TestCase
import jwt
from gargantua.utils import render_md_to_html, \
generate_passwd, validate_passwd, generate_token, validate_token
class TestMarkdown(TestCase):
def test_render_md_to_html(self):
scenarios = (
{'md': 'title\n===', 'expect': '<div class="highlight"><h1>title</h1>\n</div>'},
{'md': '#1\n##2', 'expect': '<div class="highlight"><h1>1</h1>\n\n<h2 id="2">2</h2>\n</div>'},
)
for scena in scenarios:
self.assertEqual(render_md_to_html(scena['md']), scena['expect'])
class TestEncrypt(TestCase):
def generate_random(self):
return ''.join([random.choice(ascii_lowercase) for _ in range(15)])
def test_bcrypt(self):
passwd = self.generate_random()
wrong = passwd[:-1] + '0'
hashed = generate_passwd(passwd)
self.assertTrue(validate_passwd(passwd, hashed))
self.assertFalse(validate_passwd(wrong, hashed))
def test_jwt(self):
passwd = self.generate_random()
wrong = passwd[:-1] + '0'
j = {'username': 'laisky'}
token = generate_token(j, passwd)
self.assertTrue(validate_token(token, passwd))
self.assertRaises(jwt.DecodeError, validate_token, token, wrong)
|
{
"content_hash": "9060d9bc19281dda0b6c41d504344bfb",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 106,
"avg_line_length": 32.292682926829265,
"alnum_prop": 0.6193353474320241,
"repo_name": "Laisky/laisky-blog",
"id": "1fb542ec933f01040968c7b930f524d0ad7af468",
"size": "1324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "574"
},
{
"name": "HTML",
"bytes": "31483"
},
{
"name": "JavaScript",
"bytes": "93788"
},
{
"name": "Jupyter Notebook",
"bytes": "26355"
},
{
"name": "Makefile",
"bytes": "510"
},
{
"name": "Python",
"bytes": "79973"
},
{
"name": "SCSS",
"bytes": "28238"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from entity import Entity
import project
class Task(Entity):
_matchon = 'task'
_filter_keys = [
'project', 'assignee', 'workspace', 'completed_since', 'modified_since'
]
_fields = [
'assignee','created_by','created_at','completed','completed_at',
'followers','modified_at','name','notes','projects','parent',
'workspace'
]
_children = {
'tags': None
}
filter_sections = True
@classmethod
def _filter_result_item(cls, entity, query):
if cls.filter_sections and cls._is_section(entity):
return False
return super(Task, cls)._filter_result_item(entity, query)
@classmethod
def _is_section(cls, ent):
"""Checks whether a dict from the API is a section Task
:param ent: The dict to check
"""
return ent['name'] and cls._is_section_name(ent['name'])
@staticmethod
def _is_section_name(name):
return name and name[-1] == ':'
def is_section(self):
return self._is_section_name(self.name)
def add_project(self, projectOrId):
"""Adds this task to a project
:param projectOrId Either the project object or a project ID
"""
return self._edit_project('addProject', projectOrId)
def remove_project(self, projectOrId):
"""Removes this task from a project
:param projectOrId Either the project object or a project ID
"""
return self._edit_project('removeProject', projectOrId)
def _edit_project(self, operation, projectOrId, data={}):
pId = projectOrId.id if isinstance(projectOrId, project.Project) else projectOrId
data['project'] = pId
return self._get_api().post(
'/'.join([self._get_item_url(), operation]),
data=data
)
def add_to_section(self, section):
"""Moves this task to a section
If this task and the section share one or more projects the first one
found is used. If they don't the first project in the section is used
:param section The section to move to
"""
sharedProjects = set(self.projects) & set(section.projects)
if len(sharedProjects):
pId = sharedProjects.pop()
else:
pId = section.projects[0]
return self._edit_project(
'addProject',
pId,
{'insert_after': section.id}
)
|
{
"content_hash": "f6f175915a83c1ce833dad34cd99ddcb",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 83,
"avg_line_length": 23.786516853932586,
"alnum_prop": 0.6868209730751063,
"repo_name": "mburgs/asanorm",
"id": "b2b10b2be03f67f8b37ae5199196d999964d2727",
"size": "2142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asana/entities/task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25818"
}
],
"symlink_target": ""
}
|
import functools
import getpass
import logging
import time
import uuid
import novaclient.exceptions
import novaclient.client
from cloudenvy import exceptions
def not_found(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except novaclient.exceptions.NotFound:
return None
return wrapped
def bad_request(func):
"""decorator to wrap novaclient functions that may return a
400 'BadRequest' exception when the endpoint is unavailable or
unable to be resolved.
"""
#novaclient.exceptions.BadRequest
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except novaclient.exceptions.BadRequest as xcpt:
logging.error("Unable to communicate with endpoints: "
"Received 400/Bad Request from OpenStack: " +
str(xcpt))
exit()
return wrapped
def retry_on_overlimit(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except novaclient.exceptions.OverLimit as exc:
retry_time = getattr(exc, 'retry_after', 0)
if not retry_time:
logging.fatal('Unable to allocate resource: %s' % exc.message)
raise SystemExit()
logging.debug('Request was limited, retrying in %s seconds: %s'
% (retry_time, exc.message))
time.sleep(retry_time)
try:
return func(*args, **kwargs)
except novaclient.exceptions.OverLimit as exc:
logging.fatal('Unable to allocate resource: %s' % exc.message)
raise SystemExit()
return wrapped
class CloudAPI(object):
def __init__(self, config):
self._client = None
self.config = config
#NOTE(bcwaldon): This was just dumped here to make room for EC2.
# Clean it up!
for item in ['os_username', 'os_tenant_name', 'os_auth_url']:
try:
config.user_config['cloud'][item]
except KeyError:
raise SystemExit("Ensure '%s' is set in user config" % item)
try:
password = config.user_config['cloud']['os_password']
except KeyError:
username = config.user_config['cloud']['os_username']
prompt = "Password for account '%s': " % username
password = getpass.getpass(prompt)
config.user_config['cloud']['os_password'] = password
# OpenStack Auth Items
self.user = self.config.user_config['cloud'].get('os_username', None)
self.password = self.config.user_config['cloud'].get('os_password', None)
self.tenant_name = self.config.user_config['cloud'].get('os_tenant_name',
None)
self.auth_url = self.config.user_config['cloud'].get('os_auth_url', None)
self.region_name = self.config.user_config['cloud'].get('os_region_name',
None)
@property
def client(self):
if not self._client:
self._client = novaclient.client.Client(
'2',
self.user,
self.password,
self.tenant_name,
self.auth_url,
no_cache=True,
region_name=self.region_name)
return self._client
def is_server_active(self, server_id):
server = self.get_server(server_id)
return server.status == 'ACTIVE'
def is_network_active(self, server_id):
server = self.get_server(server_id)
return len(server.networks) > 0
@bad_request
def list_servers(self):
return self.client.servers.list()
@bad_request
@not_found
def find_server(self, name):
return self.client.servers.find(name=name)
@bad_request
@not_found
def get_server(self, server_id):
return self.client.servers.get(server_id)
@retry_on_overlimit
@bad_request
def create_server(self, *args, **kwargs):
kwargs.setdefault('meta', {})
#TODO(gabrielhurley): Allow user-defined server metadata, see
#https://github.com/cloudenvy/cloudenvy/issues/125 for more info.
kwargs['meta']['os_auth_url'] = self.auth_url
return self.client.servers.create(*args, **kwargs)
def setup_network(self, server_id):
server = self.get_server(server_id)
try:
floating_ip = self._find_free_ip()
except exceptions.NoIPsAvailable:
logging.info('Allocating a new floating ip to project.')
self._allocate_floating_ip()
floating_ip = self._find_free_ip()
logging.info('Assigning floating ip %s to server.', floating_ip)
self._assign_ip(server, floating_ip)
@bad_request
def _find_free_ip(self):
fips = self.client.floating_ips.list()
for fip in fips:
if not fip.instance_id:
return fip.ip
raise exceptions.NoIPsAvailable()
@bad_request
def find_ip(self, server_id):
fips = self.client.floating_ips.list()
for fip in fips:
if fip.instance_id == server_id:
return fip.ip
@retry_on_overlimit
@bad_request
def _assign_ip(self, server, ip):
server.add_floating_ip(ip)
@bad_request
@not_found
def find_image(self, search_str):
try:
return self.client.images.find(name=search_str)
except novaclient.exceptions.NotFound:
pass
try:
#NOTE(bcwaldon): We can't guarantee all images use UUID4 for their
# image ID format, but this is the only way to get around issue
# 69 (https://github.com/cloudenvy/cloudenvy/issues/69) for now.
# Novaclient should really block us from requesting an image by
# ID that's actually a human-readable name (with spaces in it).
uuid.UUID(search_str)
return self.client.images.get(search_str)
except (ValueError, novaclient.exceptions.NotFound):
raise SystemExit('Image `%s` could not be found.' % search_str)
@retry_on_overlimit
@bad_request
def snapshot(self, server, name):
return self.client.servers.create_image(server, name)
@bad_request
@not_found
def find_flavor(self, name):
return self.client.flavors.find(name=name)
@bad_request
@not_found
def find_security_group(self, name):
return self.client.security_groups.find(name=name)
@retry_on_overlimit
@bad_request
@not_found
def create_security_group(self, name):
return self.client.security_groups.create(name, name)
@retry_on_overlimit
def create_security_group_rule(self, security_group, rule):
try:
return self.client.security_group_rules.create(
security_group.id, *rule)
except novaclient.exceptions.BadRequest:
logging.info('Security Group Rule "%s" already exists.' %
str(rule))
@retry_on_overlimit
@bad_request
def _allocate_floating_ip(self):
return self.client.floating_ips.create()
@bad_request
@not_found
def find_keypair(self, name):
return self.client.keypairs.find(name=name)
@retry_on_overlimit
@bad_request
def create_keypair(self, name, key_data):
return self.client.keypairs.create(name, public_key=key_data)
def delete_server(self, server):
server.delete()
|
{
"content_hash": "b76d857b57f36df33dd352c262924f01",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 81,
"avg_line_length": 32.675105485232066,
"alnum_prop": 0.5910382231404959,
"repo_name": "cloudenvy/cloudenvy",
"id": "ce04498fa1c5c1f1471b9e69c3b5890580ad2262",
"size": "7788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudenvy/clouds/openstack.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "51038"
}
],
"symlink_target": ""
}
|
"""
Sphinx plugins for web API docs.
"""
import inspect
import logging
import re
import sys
try:
import json
except ImportError:
import simplejson as json
from django.contrib.auth.models import User
from django.http import HttpRequest
from django.template.defaultfilters import title
from djblets.util.http import is_mimetype_a
from djblets.webapi.resources import get_resource_from_class, WebAPIResource
from djblets.webapi.responses import WebAPIResponseError
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.statemachine import ViewList
from reviewboard import initialize
from reviewboard.webapi.resources import resources
from sphinx import addnodes
from sphinx.util import docname_join
from sphinx.util.compat import Directive
# Mapping of mimetypes to language names for syntax highlighting.
MIMETYPE_LANGUAGES = [
('application/json', 'javascript'),
('application/xml', 'xml'),
('text/x-patch', 'diff'),
]
# Initialize Review Board
initialize()
# Build the list of parents.
resources.root.get_url_patterns()
class ResourceNotFound(Exception):
def __init__(self, directive, classname):
self.classname = classname
self.error_node = [
directive.state_machine.reporter.error(
str(self),
line=directive.lineno)
]
def __str__(self):
return ('Unable to import the web API resource class "%s"'
% self.classname)
class ErrorNotFound(Exception):
def __init__(self, directive, classname):
self.error_node = [
directive.state_machine.reporter.error(
'Unable to import the web API error class "%s"' % classname,
line=directive.lineno)
]
class DummyRequest(HttpRequest):
def __init__(self, *args, **kwargs):
super(DummyRequest, self).__init__(*args, **kwargs)
self.method = 'GET'
self.path = ''
self.user = User.objects.all()[0]
self.session = {}
# This is normally set internally by Djblets, but we don't
# go through the standard __call__ flow.
self._djblets_webapi_object_cache = {}
def build_absolute_uri(self, location=None):
if not self.path and not location:
return '/api/'
if not location:
location = self.path
if not location.startswith('http://'):
location = 'http://reviews.example.com' + location
return location
class ResourceDirective(Directive):
has_content = True
required_arguments = 0
option_spec = {
'classname': directives.unchanged_required,
'is-list': directives.flag,
'hide-links': directives.flag,
'hide-examples': directives.flag,
}
item_http_methods = set(['GET', 'DELETE', 'PUT'])
list_http_methods = set(['GET', 'POST'])
FILTERED_MIMETYPES = [
'application/json',
'application/xml',
]
type_mapping = {
int: 'Integer',
str: 'String',
unicode: 'String',
bool: 'Boolean',
dict: 'Dictionary',
file: 'Uploaded File',
}
def run(self):
try:
resource_class = self.get_resource_class(self.options['classname'])
except ResourceNotFound, e:
return e.error_node
# Add the class's file and this extension to the dependencies.
self.state.document.settings.env.note_dependency(__file__)
self.state.document.settings.env.note_dependency(
sys.modules[resource_class.__module__].__file__)
resource = get_resource_from_class(resource_class)
is_list = 'is-list' in self.options
docname = 'webapi2.0-%s-resource' % \
get_resource_docname(resource, is_list)
resource_title = get_resource_title(resource, is_list)
targetnode = nodes.target('', '', ids=[docname], names=[docname])
self.state.document.note_explicit_target(targetnode)
main_section = nodes.section(ids=[docname])
# Main section
main_section += nodes.title(text=resource_title)
main_section += parse_text(
self, inspect.getdoc(resource),
where='%s class docstring' % self.options['classname'])
# Details section
details_section = nodes.section(ids=['details'])
main_section += details_section
details_section += nodes.title(text='Details')
details_section += self.build_details_table(resource)
# Fields section
if (resource.fields and
(not is_list or resource.singleton)):
fields_section = nodes.section(ids=['fields'])
main_section += fields_section
fields_section += nodes.title(text='Fields')
fields_section += self.build_fields_table(resource.fields)
# Links section
if 'hide-links' not in self.options:
fields_section = nodes.section(ids=['links'])
main_section += fields_section
fields_section += nodes.title(text='Links')
fields_section += self.build_links_table(resource)
# HTTP method descriptions
for http_method in self.get_http_methods(resource, is_list):
method_section = nodes.section(ids=[http_method])
main_section += method_section
method_section += nodes.title(text='HTTP %s' % http_method)
method_section += self.build_http_method_section(resource,
http_method)
if 'hide-examples' not in self.options:
examples_section = nodes.section(ids=['examples'])
examples_section += nodes.title(text='Examples')
has_examples = False
if is_list:
mimetype_key = 'list'
else:
mimetype_key = 'item'
for mimetype in resource.allowed_mimetypes:
try:
mimetype = mimetype[mimetype_key]
except KeyError:
continue
if mimetype in self.FILTERED_MIMETYPES:
# Resources have more specific mimetypes. We want to
# filter out the general ones (like application/json)
# so we don't show redundant examples.
continue
if mimetype.endswith('xml'):
# JSON is preferred. While we support XML, let's not
# continue to advertise it.
continue
url, headers, data = \
self.fetch_resource_data(resource, mimetype)
example_node = build_example(headers, data, mimetype)
if example_node:
example_section = \
nodes.section(ids=['example_' + mimetype],
classes=['examples', 'requests-example'])
examples_section += example_section
example_section += nodes.title(text=mimetype)
accept_mimetype = mimetype
if (mimetype.startswith('application/') and
mimetype.endswith('+json')):
# Instead of telling the user to ask for a specific
# mimetype on the request, show them that asking for
# application/json works fine.
accept_mimetype = 'application/json'
curl_text = ('$ curl http://reviews.example.com%s -A %s'
% (url, accept_mimetype))
example_section += nodes.literal_block(
curl_text, curl_text, classes=['cmdline'])
example_section += nodes.literal_block(
headers, headers, classes=['http-headers'])
example_section += example_node
has_examples = True
if has_examples:
main_section += examples_section
return [targetnode, main_section]
def build_details_table(self, resource):
is_list = 'is-list' in self.options
table = nodes.table(classes=['resource-info'])
tgroup = nodes.tgroup(cols=2)
table += tgroup
tgroup += nodes.colspec(colwidth=30, classes=['field'])
tgroup += nodes.colspec(colwidth=70, classes=['value'])
tbody = nodes.tbody()
tgroup += tbody
# Name
if is_list:
resource_name = resource.name_plural
else:
resource_name = resource.name
append_detail_row(tbody, "Name", nodes.literal(text=resource_name))
# URI
uri_template = get_resource_uri_template(resource, not is_list)
append_detail_row(tbody, "URI", nodes.literal(text=uri_template))
# Token Policy ID
if hasattr(resource, 'policy_id'):
append_detail_row(tbody, "Token Policy ID",
nodes.literal(text=resource.policy_id))
# HTTP Methods
allowed_http_methods = self.get_http_methods(resource, is_list)
bullet_list = nodes.bullet_list()
for http_method in allowed_http_methods:
item = nodes.list_item()
bullet_list += item
paragraph = nodes.paragraph()
item += paragraph
ref = nodes.reference(text=http_method, refid=http_method)
paragraph += ref
doc_summary = self.get_doc_for_http_method(resource, http_method)
i = doc_summary.find('.')
if i != -1:
doc_summary = doc_summary[:i + 1]
paragraph += nodes.inline(text=" - ")
paragraph += parse_text(
self, doc_summary, nodes.inline,
where='HTTP %s handler summary for %s'
% (http_method, self.options['classname']))
append_detail_row(tbody, "HTTP Methods", bullet_list)
# Parent Resource
if is_list or resource.uri_object_key is None:
parent_resource = resource._parent_resource
is_parent_list = False
else:
parent_resource = resource
is_parent_list = True
if parent_resource:
paragraph = nodes.paragraph()
paragraph += get_ref_to_resource(parent_resource, is_parent_list)
else:
paragraph = 'None.'
append_detail_row(tbody, "Parent Resource", paragraph)
# Child Resources
if is_list:
child_resources = list(resource.list_child_resources)
if resource.name != resource.name_plural:
if resource.uri_object_key:
child_resources.append(resource)
are_children_lists = False
else:
are_children_lists = True
else:
child_resources = resource.item_child_resources
are_children_lists = True
if child_resources:
tocnode = addnodes.toctree()
tocnode['glob'] = None
tocnode['maxdepth'] = 1
tocnode['hidden'] = False
docnames = sorted([
docname_join(self.state.document.settings.env.docname,
get_resource_docname(child_resource,
are_children_lists))
for child_resource in child_resources
])
tocnode['includefiles'] = docnames
tocnode['entries'] = [(None, docname) for docname in docnames]
else:
tocnode = nodes.paragraph(text="None")
append_detail_row(tbody, "Child Resources", tocnode)
# Anonymous Access
if is_list and not resource.singleton:
getter = resource.get_list
else:
getter = resource.get
if getattr(getter, 'login_required', False):
anonymous_access = 'No'
elif getattr(getter, 'checks_login_required', False):
anonymous_access = 'Yes, if anonymous site access is enabled'
else:
anonymous_access = 'Yes'
append_detail_row(tbody, "Anonymous Access", anonymous_access)
return table
def build_fields_table(self, fields, required_fields={},
show_requirement_labels=False):
def get_type_name(field_type):
# We may be dealing with a forward-declared class.
if isinstance(field_type, basestring) and field_type is not str:
field_type = self.get_resource_class(field_type)
if type(field_type) is list:
return [nodes.inline(text='List of ')] + \
get_type_name(field_type[0])
elif type(field_type) is tuple:
value_nodes = []
for value in field_type:
if value_nodes:
value_nodes.append(nodes.inline(text=', '))
value_nodes.append(nodes.literal(text=value))
return [nodes.inline(text='One of ')] + value_nodes
elif (inspect.isclass(field_type) and
issubclass(field_type, WebAPIResource)):
return [get_ref_to_resource(field_type, False)]
elif field_type in self.type_mapping:
return [nodes.inline(text=self.type_mapping[field_type])]
else:
print "Unknown type %s" % (field_type,)
assert False
table = nodes.table(classes=['resource-fields'])
tgroup = nodes.tgroup(cols=3)
table += tgroup
tgroup += nodes.colspec(colwidth=25, classes=['field'])
tgroup += nodes.colspec(colwidth=15, classes=['type'])
tgroup += nodes.colspec(colwidth=60, classes=['description'])
thead = nodes.thead()
tgroup += thead
append_row(thead, ['Field', 'Type', 'Description'])
tbody = nodes.tbody()
tgroup += tbody
if isinstance(fields, dict):
for field in sorted(fields.iterkeys()):
info = fields[field]
name_node = nodes.inline()
name_node += nodes.strong(text=field)
if show_requirement_labels:
if field in required_fields:
name_node += nodes.inline(text=" (required)")
else:
name_node += nodes.inline(text=" (optional)")
type_node = nodes.inline()
type_node += get_type_name(info['type'])
append_row(tbody,
[name_node,
type_node,
parse_text(self, info['description'],
where='%s field description' % field)])
else:
for field in sorted(fields):
name = field
if show_requirement_labels:
if field in required_fields:
name += " (required)"
else:
name += " (optional)"
append_row(tbody, [name, "", ""])
return table
def build_links_table(self, resource):
is_list = 'is-list' in self.options
table = nodes.table()
tgroup = nodes.tgroup(cols=3)
table += tgroup
tgroup += nodes.colspec(colwidth=25)
tgroup += nodes.colspec(colwidth=15)
tgroup += nodes.colspec(colwidth=60)
thead = nodes.thead()
tgroup += thead
append_row(thead, ['Name', 'Method', 'Resource'])
tbody = nodes.tbody()
tgroup += tbody
request = DummyRequest()
if is_list:
child_resources = resource.list_child_resources
else:
child_resources = resource.item_child_resources
names_to_resource = {}
for child in child_resources:
names_to_resource[child.name_plural] = (child, True)
if not is_list and resource.model:
child_keys = {}
create_fake_resource_path(request, resource, child_keys, True)
obj = resource.get_queryset(request, **child_keys)[0]
else:
obj = None
related_links = resource.get_related_links(request=request, obj=obj)
for key, info in related_links.iteritems():
names_to_resource[key] = \
(info['resource'], info.get('list-resource', False))
links = resource.get_links(child_resources, request=DummyRequest(),
obj=obj)
for linkname in sorted(links.iterkeys()):
info = links[linkname]
child, is_child_link = \
names_to_resource.get(linkname, (resource, is_list))
paragraph = nodes.paragraph()
paragraph += get_ref_to_resource(child, is_child_link)
append_row(tbody,
[nodes.strong(text=linkname),
info['method'],
paragraph])
return table
def build_http_method_section(self, resource, http_method):
doc = self.get_doc_for_http_method(resource, http_method)
http_method_func = self.get_http_method_func(resource, http_method)
# Description text
returned_nodes = [
parse_text(self, doc, where='HTTP %s doc' % http_method)
]
# Request Parameters section
required_fields = getattr(http_method_func, 'required_fields', [])
optional_fields = getattr(http_method_func, 'optional_fields', [])
if required_fields or optional_fields:
all_fields = dict(required_fields)
all_fields.update(optional_fields)
fields_section = nodes.section(ids=['%s_params' % http_method])
returned_nodes.append(fields_section)
fields_section += nodes.title(text='Request Parameters')
table = self.build_fields_table(all_fields,
required_fields=required_fields,
show_requirement_labels=True)
fields_section += table
# Errors section
errors = getattr(http_method_func, 'response_errors', [])
if errors:
errors_section = nodes.section(ids=['%s_errors' % http_method])
returned_nodes.append(errors_section)
errors_section += nodes.title(text='Errors')
bullet_list = nodes.bullet_list()
errors_section += bullet_list
for error in sorted(errors, key=lambda x: x.code):
item = nodes.list_item()
bullet_list += item
paragraph = nodes.paragraph()
item += paragraph
paragraph += get_ref_to_error(error)
return returned_nodes
def fetch_resource_data(self, resource, mimetype):
kwargs = {}
request = DummyRequest()
request.path = create_fake_resource_path(request, resource, kwargs,
'is-list' not in self.options)
headers, data = fetch_response_data(resource, mimetype, request,
**kwargs)
return request.path, headers, data
def get_resource_class(self, classname):
try:
return get_from_module(classname)
except ImportError:
raise ResourceNotFound(self, classname)
def get_http_method_func(self, resource, http_method):
if (http_method == 'GET' and 'is-list' in self.options and
not resource.singleton):
method_name = 'get_list'
else:
method_name = resource.method_mapping[http_method]
# Change "put" and "post" to "update" and "create", respectively.
# "put" and "post" are just wrappers and we don't want to show
# their documentation.
if method_name == 'put':
method_name = 'update'
elif method_name == 'post':
method_name = 'create'
return getattr(resource, method_name)
def get_doc_for_http_method(self, resource, http_method):
return inspect.getdoc(self.get_http_method_func(resource,
http_method)) or ''
def get_http_methods(self, resource, is_list):
if is_list:
possible_http_methods = self.list_http_methods
else:
possible_http_methods = self.item_http_methods
return sorted(
set(resource.allowed_methods).intersection(possible_http_methods))
class ResourceTreeDirective(Directive):
has_content = True
def run(self):
bullet_list = nodes.bullet_list()
self._output_resource(resources.root, bullet_list, True)
return [bullet_list]
def _output_resource(self, resource, parent, is_list):
item = nodes.list_item()
parent += item
paragraph = nodes.paragraph()
item += paragraph
paragraph += parse_text(
self,
':ref:`%s <%s>`' %
(get_resource_title(resource, is_list, False),
'webapi2.0-%s-resource' % get_resource_docname(resource, is_list)))
bullet_list = nodes.bullet_list()
item += bullet_list
if is_list:
if resource.uri_object_key:
self._output_resource(resource, bullet_list, False)
for child in resource.list_child_resources:
self._output_resource(child, bullet_list, True)
else:
for child in resource.item_child_resources:
self._output_resource(child, bullet_list, True)
class ErrorDirective(Directive):
has_content = True
final_argument_whitespace = True
option_spec = {
'instance': directives.unchanged_required,
'example-data': directives.unchanged,
'title': directives.unchanged,
}
MIMETYPES = [
'application/json',
'application/xml',
]
def run(self):
try:
error_obj = self.get_error_object(self.options['instance'])
except ErrorNotFound, e:
return e.error_node
# Add the class's file and this extension to the dependencies.
self.state.document.settings.env.note_dependency(__file__)
self.state.document.settings.env.note_dependency(
sys.modules[error_obj.__module__].__file__)
docname = 'webapi2.0-error-%s' % error_obj.code
error_title = self.get_error_title(error_obj)
targetnode = nodes.target('', '', ids=[docname], names=[docname])
self.state.document.note_explicit_target(targetnode)
main_section = nodes.section(ids=[docname])
# Details section
main_section += nodes.title(text=error_title)
main_section += self.build_details_table(error_obj)
# Example section
examples_section = nodes.section(ids=['examples'])
examples_section += nodes.title(text='Examples')
extra_params = {}
if 'example-data' in self.options:
extra_params = json.loads(self.options['example-data'])
has_examples = False
for mimetype in self.MIMETYPES:
headers, data = \
fetch_response_data(WebAPIResponseError, mimetype,
err=error_obj,
extra_params=extra_params)
example_node = build_example(headers, data, mimetype)
if example_node:
example_section = nodes.section(ids=['example_' + mimetype])
examples_section += example_section
example_section += nodes.title(text=mimetype)
example_section += example_node
has_examples = True
if has_examples:
main_section += examples_section
return [targetnode, main_section]
def build_details_table(self, error_obj):
table = nodes.table()
tgroup = nodes.tgroup(cols=2)
table += tgroup
tgroup += nodes.colspec(colwidth=20)
tgroup += nodes.colspec(colwidth=80)
tbody = nodes.tbody()
tgroup += tbody
# API Error Code
append_detail_row(tbody, 'API Error Code',
nodes.literal(text=error_obj.code))
# HTTP Status Code
ref = parse_text(self, ':http:`%s`' % error_obj.http_status)
append_detail_row(tbody, 'HTTP Status Code', ref)
# Error Text
append_detail_row(tbody, 'Error Text',
nodes.literal(text=error_obj.msg))
if error_obj.headers:
if callable(error_obj.headers):
headers = error_obj.headers(DummyRequest())
# HTTP Headers
if len(headers) == 1:
content = nodes.literal(text=headers.keys()[0])
else:
content = nodes.bullet_list()
for header in headers.iterkeys():
item = nodes.list_item()
content += item
literal = nodes.literal(text=header)
item += literal
append_detail_row(tbody, 'HTTP Headers', content)
# Description
append_detail_row(
tbody, 'Description',
parse_text(self, '\n'.join(self.content),
where='API error %s description' % error_obj.code))
return table
def get_error_title(self, error_obj):
if 'title' in self.options:
error_title = self.options['title']
else:
name = self.options['instance'].split('.')[-1]
error_title = name.replace('_', ' ').title()
return '%s - %s' % (error_obj.code, error_title)
def get_error_object(self, name):
try:
return get_from_module(name)
except ImportError:
raise ErrorNotFound(self, name)
def parse_text(directive, text, node_type=nodes.paragraph,
where=None):
"""Parses text in ReST format and returns a node with the content."""
assert text is not None, 'Missing text during parse_text in %s' % where
vl = ViewList()
for line in text.split('\n'):
vl.append(line, line)
node = node_type(rawsource=text)
directive.state.nested_parse(vl, 0, node)
return node
def get_from_module(name):
i = name.rfind('.')
module, attr = name[:i], name[i + 1:]
try:
mod = __import__(module, {}, {}, [attr])
return getattr(mod, attr)
except (ImportError, AttributeError):
raise ImportError
def append_row(tbody, cells):
row = nodes.row()
tbody += row
for cell in cells:
entry = nodes.entry()
row += entry
if isinstance(cell, basestring):
node = nodes.paragraph(text=cell)
else:
node = cell
entry += node
def append_detail_row(tbody, header_text, detail):
header_node = nodes.strong(text=header_text)
if isinstance(detail, basestring):
detail_node = [nodes.paragraph(text=text)
for text in detail.split('\n\n')]
else:
detail_node = detail
append_row(tbody, [header_node, detail_node])
FIRST_CAP_RE = re.compile(r'(.)([A-Z][a-z]+)')
ALL_CAP_RE = re.compile(r'([a-z0-9])([A-Z])')
def uncamelcase(name, separator='_'):
"""
Converts a string from CamelCase into a lowercase name separated by
a provided separator.
"""
s1 = FIRST_CAP_RE.sub(r'\1%s\2' % separator, name)
return ALL_CAP_RE.sub(r'\1%s\2' % separator, s1).lower()
def get_resource_title(resource, is_list, append_resource=True):
"""Returns a human-readable name for the resource."""
if hasattr(resource, 'verbose_name'):
normalized_title = resource.verbose_name
else:
class_name = resource.__class__.__name__
class_name = class_name.replace('Resource', '')
normalized_title = title(uncamelcase(class_name, ' '))
if is_list:
s = '%s List' % normalized_title
else:
s = normalized_title
if append_resource:
s += ' Resource'
return s
def get_resource_docname(resource, is_list):
"""Returns the name of the page used for a resource's documentation."""
if inspect.isclass(resource):
class_name = resource.__name__
else:
class_name = resource.__class__.__name__
class_name = class_name.replace('Resource', '')
docname = uncamelcase(class_name, '-')
if is_list and resource.name != resource.name_plural:
docname = '%s-list' % docname
return docname
def get_ref_to_doc(refname):
"""Returns a node that links to a document with the given ref name."""
ref = addnodes.pending_xref(reftype='ref', reftarget=refname,
refexplicit=False, refdomain='std')
ref += nodes.literal('hello', 'hello', classes=['xref'])
return ref
def get_ref_to_resource(resource, is_list):
"""Returns a node that links to a resource's documentation."""
return get_ref_to_doc('webapi2.0-%s-resource' %
get_resource_docname(resource, is_list))
def get_ref_to_error(error):
"""Returns a node that links to an error's documentation."""
return get_ref_to_doc('webapi2.0-error-%s' % error.code)
def get_resource_uri_template(resource, include_child):
"""Returns the URI template for a resource.
This will go up the resource tree, building a URI based on the URIs
of the parents.
"""
if resource.name == 'root':
path = '/api/'
else:
if resource._parent_resource:
path = get_resource_uri_template(resource._parent_resource, True)
path += '%s/' % resource.uri_name
if not resource.singleton and include_child and resource.model:
path += '{%s}/' % resource.uri_object_key
return path
def create_fake_resource_path(request, resource, child_keys, include_child):
"""Creates a fake path to a resource.
This will go up the resource tree, building a URI based on the URIs
of the parents and based on objects sitting in the database.
"""
if resource._parent_resource and resource._parent_resource.name != "root":
path = create_fake_resource_path(request, resource._parent_resource,
child_keys, True)
else:
path = '/api/'
if resource.name != 'root':
path += '%s/' % resource.uri_name
if (not resource.singleton and
include_child and
resource.model and
resource.uri_object_key):
q = resource.get_queryset(request, **child_keys)
if q.count() == 0:
logging.critical('Resource "%s" requires objects in the '
'database', resource.__class__)
# Do the assert so it shows up in the logs.
assert q.count() > 0
obj = q[0]
value = getattr(obj, resource.model_object_key)
child_keys[resource.uri_object_key] = value
path += '%s/' % value
return path
def build_example(headers, data, mimetype):
if not data:
return None
language = None
for base_mimetype, lang in MIMETYPE_LANGUAGES:
if is_mimetype_a(mimetype, base_mimetype):
language = lang
break
if language == 'javascript':
code = json.dumps(json.loads(data), sort_keys=True, indent=2)
else:
code = data
return nodes.literal_block(code, code, language=language,
classes=['example-payload'])
def fetch_response_data(response_class, mimetype, request=None, **kwargs):
if not request:
request = DummyRequest()
request.META['HTTP_ACCEPT'] = mimetype
result = unicode(response_class(request, **kwargs))
return result.split('\r\n\r\n', 1)
def setup(app):
app.add_directive('webapi-resource', ResourceDirective)
app.add_directive('webapi-resource-tree', ResourceTreeDirective)
app.add_directive('webapi-error', ErrorDirective)
app.add_crossref_type('webapi2.0', 'webapi2.0', 'single: %s',
nodes.emphasis)
|
{
"content_hash": "cce2c79e5ab38604cde075e15373e87c",
"timestamp": "",
"source": "github",
"line_count": 994,
"max_line_length": 80,
"avg_line_length": 32.74849094567404,
"alnum_prop": 0.5615015974440895,
"repo_name": "1tush/reviewboard",
"id": "f7cde07ab8969e1fd6f6d3996c6cceb4a3decca4",
"size": "32552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/manual/_ext/webapidocs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "685"
},
{
"name": "C#",
"bytes": "340"
},
{
"name": "CSS",
"bytes": "157867"
},
{
"name": "Java",
"bytes": "340"
},
{
"name": "JavaScript",
"bytes": "1256833"
},
{
"name": "Objective-C",
"bytes": "288"
},
{
"name": "PHP",
"bytes": "278"
},
{
"name": "Perl",
"bytes": "103"
},
{
"name": "Python",
"bytes": "3124372"
},
{
"name": "Ruby",
"bytes": "172"
},
{
"name": "Shell",
"bytes": "963"
}
],
"symlink_target": ""
}
|
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class scparameter(base_resource) :
""" Configuration for SC parameter resource. """
def __init__(self) :
self._sessionlife = 0
self._vsr = ""
@property
def sessionlife(self) :
"""Time, in seconds, between the first time and the next time the SureConnect alternative content window is displayed. The alternative content window is displayed only once during a session for the same browser accessing a configured URL, so this parameter determines the length of a session.<br/>Default value: 300<br/>Minimum length = 1<br/>Maximum length = 0xFFFFFFFE.
"""
try :
return self._sessionlife
except Exception as e:
raise e
@sessionlife.setter
def sessionlife(self, sessionlife) :
"""Time, in seconds, between the first time and the next time the SureConnect alternative content window is displayed. The alternative content window is displayed only once during a session for the same browser accessing a configured URL, so this parameter determines the length of a session.<br/>Default value: 300<br/>Minimum length = 1<br/>Maximum length = 0xFFFFFFFE
"""
try :
self._sessionlife = sessionlife
except Exception as e:
raise e
@property
def vsr(self) :
"""File containing the customized response to be displayed when the ACTION in the SureConnect policy is set to NS.<br/>Default value: "DEFAULT"<br/>Minimum length = 1.
"""
try :
return self._vsr
except Exception as e:
raise e
@vsr.setter
def vsr(self, vsr) :
"""File containing the customized response to be displayed when the ACTION in the SureConnect policy is set to NS.<br/>Default value: "DEFAULT"<br/>Minimum length = 1
"""
try :
self._vsr = vsr
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(scparameter_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.scparameter
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update scparameter.
"""
try :
if type(resource) is not list :
updateresource = scparameter()
updateresource.sessionlife = resource.sessionlife
updateresource.vsr = resource.vsr
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of scparameter resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = scparameter()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the scparameter resources that are configured on netscaler.
"""
try :
if not name :
obj = scparameter()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class scparameter_response(base_response) :
def __init__(self, length=1) :
self.scparameter = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.scparameter = [scparameter() for _ in range(length)]
|
{
"content_hash": "ae1b8586a13dff3df4bb2a28cb7711a7",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 375,
"avg_line_length": 34.422764227642276,
"alnum_prop": 0.7175247992442135,
"repo_name": "mahabs/nitro",
"id": "d1819ec00ec77ceecb063ccf574e0576bbfc4756",
"size": "4848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/sc/scparameter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "498"
},
{
"name": "Python",
"bytes": "10647176"
}
],
"symlink_target": ""
}
|
import mock
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from glance.domain import proxy
import glance.tests.utils as test_utils
UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d'
TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df'
class FakeProxy(object):
def __init__(self, base, *args, **kwargs):
self.base = base
self.args = args
self.kwargs = kwargs
class FakeRepo(object):
def __init__(self, result=None):
self.args = None
self.kwargs = None
self.result = result
def fake_method(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
return self.result
get = fake_method
list = fake_method
add = fake_method
save = fake_method
remove = fake_method
class TestProxyRepoPlain(test_utils.BaseTestCase):
def setUp(self):
super(TestProxyRepoPlain, self).setUp()
self.fake_repo = FakeRepo()
self.proxy_repo = proxy.Repo(self.fake_repo)
def _test_method(self, name, base_result, *args, **kwargs):
self.fake_repo.result = base_result
method = getattr(self.proxy_repo, name)
proxy_result = method(*args, **kwargs)
self.assertEqual(base_result, proxy_result)
self.assertEqual(args, self.fake_repo.args)
self.assertEqual(kwargs, self.fake_repo.kwargs)
def test_get(self):
self._test_method('get', 'snarf', 'abcd')
def test_list(self):
self._test_method('list', ['sniff', 'snarf'], 2, filter='^sn')
def test_add(self):
self._test_method('add', 'snuff', 'enough')
def test_save(self):
self._test_method('save', 'snuff', 'enough', from_state=None)
def test_remove(self):
self._test_method('add', None, 'flying')
class TestProxyRepoWrapping(test_utils.BaseTestCase):
def setUp(self):
super(TestProxyRepoWrapping, self).setUp()
self.fake_repo = FakeRepo()
self.proxy_repo = proxy.Repo(self.fake_repo,
item_proxy_class=FakeProxy,
item_proxy_kwargs={'a': 1})
def _test_method(self, name, base_result, *args, **kwargs):
self.fake_repo.result = base_result
method = getattr(self.proxy_repo, name)
proxy_result = method(*args, **kwargs)
self.assertIsInstance(proxy_result, FakeProxy)
self.assertEqual(base_result, proxy_result.base)
self.assertEqual(0, len(proxy_result.args))
self.assertEqual({'a': 1}, proxy_result.kwargs)
self.assertEqual(args, self.fake_repo.args)
self.assertEqual(kwargs, self.fake_repo.kwargs)
def test_get(self):
self.fake_repo.result = 'snarf'
result = self.proxy_repo.get('some-id')
self.assertIsInstance(result, FakeProxy)
self.assertEqual(('some-id',), self.fake_repo.args)
self.assertEqual({}, self.fake_repo.kwargs)
self.assertEqual('snarf', result.base)
self.assertEqual(tuple(), result.args)
self.assertEqual({'a': 1}, result.kwargs)
def test_list(self):
self.fake_repo.result = ['scratch', 'sniff']
results = self.proxy_repo.list(2, prefix='s')
self.assertEqual((2,), self.fake_repo.args)
self.assertEqual({'prefix': 's'}, self.fake_repo.kwargs)
self.assertEqual(2, len(results))
for i in range(2):
self.assertIsInstance(results[i], FakeProxy)
self.assertEqual(self.fake_repo.result[i], results[i].base)
self.assertEqual(tuple(), results[i].args)
self.assertEqual({'a': 1}, results[i].kwargs)
def _test_method_with_proxied_argument(self, name, result, **kwargs):
self.fake_repo.result = result
item = FakeProxy('snoop')
method = getattr(self.proxy_repo, name)
proxy_result = method(item)
self.assertEqual(('snoop',), self.fake_repo.args)
self.assertEqual(kwargs, self.fake_repo.kwargs)
if result is None:
self.assertIsNone(proxy_result)
else:
self.assertIsInstance(proxy_result, FakeProxy)
self.assertEqual(result, proxy_result.base)
self.assertEqual(tuple(), proxy_result.args)
self.assertEqual({'a': 1}, proxy_result.kwargs)
def test_add(self):
self._test_method_with_proxied_argument('add', 'dog')
def test_add_with_no_result(self):
self._test_method_with_proxied_argument('add', None)
def test_save(self):
self._test_method_with_proxied_argument('save', 'dog',
from_state=None)
def test_save_with_no_result(self):
self._test_method_with_proxied_argument('save', None,
from_state=None)
def test_remove(self):
self._test_method_with_proxied_argument('remove', 'dog')
def test_remove_with_no_result(self):
self._test_method_with_proxied_argument('remove', None)
class FakeImageFactory(object):
def __init__(self, result=None):
self.result = None
self.kwargs = None
def new_image(self, **kwargs):
self.kwargs = kwargs
return self.result
class TestImageFactory(test_utils.BaseTestCase):
def setUp(self):
super(TestImageFactory, self).setUp()
self.factory = FakeImageFactory()
def test_proxy_plain(self):
proxy_factory = proxy.ImageFactory(self.factory)
self.factory.result = 'eddard'
image = proxy_factory.new_image(a=1, b='two')
self.assertEqual('eddard', image)
self.assertEqual({'a': 1, 'b': 'two'}, self.factory.kwargs)
def test_proxy_wrapping(self):
proxy_factory = proxy.ImageFactory(self.factory,
proxy_class=FakeProxy,
proxy_kwargs={'dog': 'bark'})
self.factory.result = 'stark'
image = proxy_factory.new_image(a=1, b='two')
self.assertIsInstance(image, FakeProxy)
self.assertEqual('stark', image.base)
self.assertEqual({'a': 1, 'b': 'two'}, self.factory.kwargs)
class FakeImageMembershipFactory(object):
def __init__(self, result=None):
self.result = None
self.image = None
self.member_id = None
def new_image_member(self, image, member_id):
self.image = image
self.member_id = member_id
return self.result
class TestImageMembershipFactory(test_utils.BaseTestCase):
def setUp(self):
super(TestImageMembershipFactory, self).setUp()
self.factory = FakeImageMembershipFactory()
def test_proxy_plain(self):
proxy_factory = proxy.ImageMembershipFactory(self.factory)
self.factory.result = 'tyrion'
membership = proxy_factory.new_image_member('jaime', 'cersei')
self.assertEqual('tyrion', membership)
self.assertEqual('jaime', self.factory.image)
self.assertEqual('cersei', self.factory.member_id)
def test_proxy_wrapped_membership(self):
proxy_factory = proxy.ImageMembershipFactory(
self.factory, proxy_class=FakeProxy, proxy_kwargs={'a': 1})
self.factory.result = 'tyrion'
membership = proxy_factory.new_image_member('jaime', 'cersei')
self.assertIsInstance(membership, FakeProxy)
self.assertEqual('tyrion', membership.base)
self.assertEqual({'a': 1}, membership.kwargs)
self.assertEqual('jaime', self.factory.image)
self.assertEqual('cersei', self.factory.member_id)
def test_proxy_wrapped_image(self):
proxy_factory = proxy.ImageMembershipFactory(
self.factory, proxy_class=FakeProxy)
self.factory.result = 'tyrion'
image = FakeProxy('jaime')
membership = proxy_factory.new_image_member(image, 'cersei')
self.assertIsInstance(membership, FakeProxy)
self.assertIsInstance(self.factory.image, FakeProxy)
self.assertEqual('cersei', self.factory.member_id)
def test_proxy_both_wrapped(self):
class FakeProxy2(FakeProxy):
pass
proxy_factory = proxy.ImageMembershipFactory(
self.factory,
proxy_class=FakeProxy,
proxy_kwargs={'b': 2})
self.factory.result = 'tyrion'
image = FakeProxy2('jaime')
membership = proxy_factory.new_image_member(image, 'cersei')
self.assertIsInstance(membership, FakeProxy)
self.assertEqual('tyrion', membership.base)
self.assertEqual({'b': 2}, membership.kwargs)
self.assertIsInstance(self.factory.image, FakeProxy2)
self.assertEqual('cersei', self.factory.member_id)
class FakeImage(object):
def __init__(self, result=None):
self.result = result
class TestTaskFactory(test_utils.BaseTestCase):
def setUp(self):
super(TestTaskFactory, self).setUp()
self.factory = mock.Mock()
self.fake_type = 'import'
self.fake_owner = "owner"
def test_proxy_plain(self):
proxy_factory = proxy.TaskFactory(self.factory)
proxy_factory.new_task(
type=self.fake_type,
owner=self.fake_owner
)
self.factory.new_task.assert_called_once_with(
type=self.fake_type,
owner=self.fake_owner
)
def test_proxy_wrapping(self):
proxy_factory = proxy.TaskFactory(
self.factory,
task_proxy_class=FakeProxy,
task_proxy_kwargs={'dog': 'bark'})
self.factory.new_task.return_value = 'fake_task'
task = proxy_factory.new_task(
type=self.fake_type,
owner=self.fake_owner
)
self.factory.new_task.assert_called_once_with(
type=self.fake_type,
owner=self.fake_owner
)
self.assertIsInstance(task, FakeProxy)
self.assertEqual('fake_task', task.base)
|
{
"content_hash": "8e0ebde0c4c5c97898ef345d5106d192",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 73,
"avg_line_length": 34.697916666666664,
"alnum_prop": 0.6148303812668868,
"repo_name": "klmitch/glance",
"id": "6979cab776fc31e9683a3e9b944de879b93367c5",
"size": "10657",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "glance/tests/unit/test_domain_proxy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4140950"
},
{
"name": "Shell",
"bytes": "7753"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Action.actor_object_id'
db.alter_column('actstream_action', 'actor_object_id', self.gf('django.db.models.fields.CharField')(max_length=255, null=True))
# Changing field 'Action.actor_content_type'
db.alter_column('actstream_action', 'actor_content_type_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['contenttypes.ContentType']))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Action.actor_object_id'
raise RuntimeError("Cannot reverse this migration. 'Action.actor_object_id' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Action.actor_content_type'
raise RuntimeError("Cannot reverse this migration. 'Action.actor_content_type' and its values cannot be restored.")
models = {
'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'actor'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'actstream.follow': {
'Meta': {'unique_together': "(('user', 'content_type', 'object_id'),)", 'object_name': 'Follow'},
'actor_only': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'actstream.hiddenaction': {
'Meta': {'object_name': 'HiddenAction'},
'action': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hidden_by_user'", 'to': "orm['actstream.Action']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 6, 5, 16, 54, 2, 221950)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 6, 5, 16, 54, 2, 221774)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['actstream']
|
{
"content_hash": "8947093550ac936f8cba67424826f7dc",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 204,
"avg_line_length": 74.55913978494624,
"alnum_prop": 0.5742717046437843,
"repo_name": "Eksmo/django-activity-stream",
"id": "380370c627c299aef2c2f000d3c6e3cae9884bcc",
"size": "6952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "actstream/migrations/0007_auto__chg_field_action_actor_object_id__chg_field_action_actor_content.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "26101"
},
{
"name": "Python",
"bytes": "205988"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
import csv
import copy
import re
import json
import fnmatch
import hashlib
import itertools
import numpy as np
import numexpr as ne
import scipy.special
import os
import sys
from time import clock
from tqdm import tqdm
import contextlib
import formulate
import uproot
from collections import defaultdict
try:
from functools import reduce
except:
pass
import logging
logger = logging.getLogger(__name__)
# for regular expressions in:
# supercuts_to_branches
# expand_supercuts
strformat_chars = re.compile("[{}]")
supercutvar_chars = re.compile("VVV(.*?)VVV")
def format_arg_value(arg_val):
""" Return a string representing a (name, value) pair.
>>> format_arg_value(('x', (1, 2, 3)))
'x=(1, 2, 3)'
"""
arg, val = arg_val
return "%s=%r" % (arg, val)
# http://wordaligned.org/articles/echo
def echo(*echoargs, **echokwargs):
logger.debug(echoargs)
logger.debug(echokwargs)
def echo_wrap(fn):
""" Echo calls to a function.
Returns a decorated version of the input function which "echoes" calls
made to it by writing out the function's name and the arguments it was
called with.
"""
# Unpack function's arg count, arg names, arg defaults
code = fn.func_code
argcount = code.co_argcount
argnames = code.co_varnames[:argcount]
fn_defaults = fn.func_defaults or list()
argdefs = dict(zip(argnames[-len(fn_defaults) :], fn_defaults))
def wrapped(*v, **k):
# Collect function arguments by chaining together positional,
# defaulted, extra positional and keyword arguments.
positional = map(format_arg_value, zip(argnames, v))
defaulted = [
format_arg_value((a, argdefs[a]))
for a in argnames[len(v) :]
if a not in k
]
nameless = map(repr, v[argcount:])
keyword = map(format_arg_value, k.items())
args = positional + defaulted + nameless + keyword
write("%s(%s)\n" % (fn.__name__, ", ".join(args)))
return fn(*v, **k)
return wrapped
write = echokwargs.get("write", sys.stdout.write)
if len(echoargs) == 1 and callable(echoargs[0]):
return echo_wrap(echoargs[0])
return echo_wrap
# http://stackoverflow.com/a/31953563/1532974
# Do not allow duplicate log messages, such as inside loops
class DuplicateFilter(object):
def __init__(self):
self.msgs = set()
def filter(self, record):
rv = record.msg not in self.msgs
self.msgs.add(record.msg)
return rv
# http://stackoverflow.com/a/38739634/1532974
class TqdmLoggingHandler(logging.Handler):
def __init__(self, level=logging.NOTSET):
super(self.__class__, self).__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
# for redirecting sys.stdout to tqdm
class DummyTqdmFile(object):
"""Dummy file-like that will write to tqdm"""
file = None
def __init__(self, file):
self.file = file
def write(self, x):
# Avoid print() second call (useless \n)
if len(x.rstrip()) > 0:
tqdm.write(x, file=self.file)
def flush(self):
pass
@contextlib.contextmanager
def std_out_err_redirect_tqdm():
orig_out_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = map(DummyTqdmFile, orig_out_err)
yield orig_out_err[0]
# Relay exceptions
except Exception as exc:
raise exc
# Always restore sys.stdout/err if necessary
finally:
sys.stdout, sys.stderr = orig_out_err
# @echo(write=logger.debug)
def load_mass_windows(filename):
with open(filename, "r") as f:
return {l[0]: tuple(l[1:4]) for l in csv.reader(f, delimiter="\t")}
# @echo(write=logger.debug)
did_regex = re.compile("(?:00)?([1-9]\d{5})(?=\.[a-zA-Z_]+\.?)")
def get_did(filename):
global did_regex
# check if the dirname matches
m = did_regex.search(os.path.basename(os.path.dirname(filename)))
if m is None:
# no, does the basename match?
m = did_regex.search(os.path.basename(filename))
if m is None:
# no, we have no idea what this shit is, use the basename of the filename
logger.warning(
"Can't figure out DID from dirname: {0:s}! Using the input basename instead: {1:s}".format(
os.path.basename(os.path.dirname(filename)),
os.path.basename(filename),
)
)
return os.path.basename(filename)
return m.group(1)
# @echo(write=logger.debug)
def match_branch(branch, list_of_branches):
if branch in list_of_branches:
return True
for b in list_of_branches:
if re.compile(fnmatch.translate(b)).search(branch):
return True
return False
# @echo(write=logger.debug)
def read_supercuts_file(filename):
logger.info("Reading supercuts file {0}".format(filename))
logger.info("\tOpening")
with open(filename) as f:
supercuts = json.load(f)
logger.info("\tLoaded")
selections = set([supercut["selections"] for supercut in supercuts])
try:
for supercut in supercuts:
selections.remove(supercut["selections"])
except KeyError:
raise KeyError(
"Found more than one supercut definition on {0}".format(
supercut["selections"]
)
)
logger.info("\tFound {1:d} supercut definitions".format(filename, len(supercuts)))
return supercuts
def significance(signalExp, backgroundExp, relativeBkgUncert):
""" Numpy/Scipy port of the RooStats function `BinomialExpZ'
See: https://root.cern.ch/doc/master/NumberCountingUtils_8cxx_source.html
"""
# pylint: disable=invalid-name
mainInf = signalExp + backgroundExp
tau = 1.0 / backgroundExp / (relativeBkgUncert * relativeBkgUncert)
auxiliaryInf = backgroundExp * tau
P_Bi = scipy.special.betainc(mainInf, auxiliaryInf + 1, 1.0 / (1.0 + tau))
return -scipy.special.ndtri(P_Bi)
# @echo(write=logger.debug)
def get_significance(
signal, bkgd, insignificanceThreshold, bkgdUncertainty, bkgdStatUncertainty, rawBkgd
):
# if not enough events, return string of which one did not have enough
if signal < insignificanceThreshold:
# sigDetails['insignificance'] = "signal"
sig = -1
elif bkgd < insignificanceThreshold:
# sigDetails['insignificance'] = "bkgd"
sig = -2
elif rawBkgd < 1 / (
pow(bkgdStatUncertainty, 2)
): # require sqrt(numBkgd)/numBkgd < bkgdStatUncertainty
# sigDetails['insignificance'] = "bkgdstat"
sig = -3
else:
# otherwise, calculate!
sig = significance(signal, bkgd, bkgdUncertainty)
return sig
# @echo(write=logger.debug)
def cut_to_selection(cut):
return cut["selections"].format(*cut["pivot"])
# @echo(write=logger.debug)
def cuts_to_selection(cuts):
return "({})".format(")*(".join(map(cut_to_selection, cuts)))
# @echo(write=logger.debug)
def extract_branch_names(string):
try:
string = string.decode()
except (UnicodeDecodeError, AttributeError):
pass
return formulate.from_auto(string).variables
def supercuts_to_branches(supercuts):
return set(
itertools.chain.from_iterable(
extract_branch_names(strformat_chars.sub('', supercut["selections"]))
for supercut in supercuts
)
)
# @echo(write=logger.debug)
def get_cut(superCuts, index=0):
# reached bottom of iteration, yield what we've done
if index >= len(superCuts):
yield superCuts
else:
# start of iteration, make a copy of the input dictionary
# if index == 0: superCuts = copy.deepcopy(superCuts)
# reference to item
item = superCuts[index]
# are we doing a fixed cut? they should specify only pivot
try:
# if they don't want a fixed cut, then they need start, stop, step in st3
for pivot in itertools.product(*(np.arange(*st3) for st3 in item["st3"])):
# set the pivot value
item["pivot"] = pivot
item["fixed"] = False
# recursively call, yield the result which is the superCuts
for cut in get_cut(superCuts, index + 1):
yield cut
except KeyError:
try: # look for "list" key
for pivot in itertools.product(*item["list"]): # chiara: change here
# set the pivot value
item["pivot"] = pivot
item["fixed"] = False
# recursively call, yield the result which is the superCuts
for cut in get_cut(superCuts, index + 1):
yield cut
except KeyError: # if "st3" and "list" keys are not there, it's a fixed cut
item["fixed"] = True
for cut in get_cut(superCuts, index + 1):
yield cut
def get_n_cuts(supercuts):
total = 1
for supercut in supercuts:
if "st3" in supercut:
total *= reduce(
lambda x, y: x * y,
(np.ceil((st3[1] - st3[0]) / st3[2]) for st3 in supercut["st3"]),
)
if "list" in supercut: # if it's a list of values, just look at len()
total *= reduce((lambda x, y: x * y), [len(l) for l in supercut["list"]])
return total
# @echo(write=logger.debug)
def get_cut_hash(cut):
return hashlib.md5(
str([sorted(obj.items()) for obj in cut]).encode("utf-8")
).hexdigest()
# @echo(write=logger.debug)
def apply_cut(arr, cut):
return ne.evaluate(cut_to_selection(cut), local_dict=arr)
def expand_definition(component, aliases):
found_substitution = any(a for a in aliases.keys() if a in component.variables)
if not found_substitution:
component = copy.deepcopy(component)
elif isinstance(component, formulate.expression.SingleComponent):
component = copy.deepcopy(aliases.get(str(component), component))
else:
# component is an expression
component._args = [expand_definition(arg, aliases) for arg in component.args]
component = expand_definition(component, aliases)
return component
def expand_selection(selection, aliases):
return expand_definition(formulate.from_auto(selection), aliases).to_numexpr()
def expand_supercuts(supercuts, aliases):
supercuts = copy.deepcopy(supercuts)
for supercut in supercuts:
supercut["selections"] = supercutvar_chars.sub(
r'{\1}',
expand_selection(
strformat_chars.sub('VVV', supercut["selections"]), aliases
),
)
return supercuts
# @echo(write=logger.debug)
def apply_cuts(events, cuts, eventWeightBranch):
entireSelection = "{0:s}*{1:s}".format(eventWeightBranch, cuts_to_selection(cuts))
events = ne.evaluate(entireSelection, local_dict=events)
# events = tree[eventWeightBranch][reduce(np.bitwise_and, (apply_cut(tree, cut) for cut in cuts))]
# count number of events that pass, not summing the weights since `events!=0` returns a boolean array
return np.sum(events != 0).astype(float), np.sum(events).astype(float)
# @echo(write=logger.debug)
def do_cut(
tree_name,
files,
supercuts,
proposedBranches,
output_directory,
eventWeightBranch,
position,
hide_subtask,
):
if hide_subtask:
position = -1
start = clock()
try:
branches = []
aliases = {}
missingBranches = False
for fname in files:
with uproot.open(fname) as f:
tree = f[tree_name]
for branch in proposedBranches:
if branch in tree:
branches.append(branch)
else:
if branch in tree.aliases:
aliases[branch.decode()] = formulate.from_auto(
tree.aliases[branch].decode()
)
branches.extend(extract_branch_names(tree.aliases[branch]))
else:
logger.error(
'branch {} not found in {} for {}'.format(
branch, tree_name, fname
)
)
missingBranches |= True
if missingBranches:
sys.exit(1)
for alias, alias_expr in aliases.items():
alias_expr = expand_definition(alias_expr, aliases)
branches.extend(extract_branch_names(alias_expr.to_numexpr()))
aliases[alias] = alias_expr
branches = set(branches)
eventWeightBranch = expand_selection(eventWeightBranch, aliases)
supercuts = expand_supercuts(supercuts, aliases)
# iterate over the cuts available
cuts = defaultdict(lambda: {'raw': 0, 'weighted': 0})
events_tqdm = tqdm(
total=uproot.numentries(files, tree_name),
disable=(position == -1),
position=2 * position + 1,
leave=False,
mininterval=5,
maxinterval=10,
unit="events",
dynamic_ncols=True,
)
for file, start, stop, events in uproot.iterate(
files,
tree_name,
branches=branches,
namedecode='utf-8',
reportfile=True,
reportentries=True,
):
events_tqdm.set_description(
"({1:d}) Working on {0:s}".format(
tree_name.decode('utf-8'), 2 * position + 1
)
)
for cut in tqdm(
get_cut(copy.deepcopy(supercuts)),
desc="({1:d}) Applying cuts to {0:s}".format(
file.name.decode('utf-8'), 2 * position + 2
),
total=get_n_cuts(supercuts),
disable=(position == -1),
position=2 * position + 2,
leave=False,
unit="cuts",
miniters=10,
dynamic_ncols=True,
):
cut_hash = get_cut_hash(cut)
rawEvents, weightedEvents = apply_cuts(events, cut, eventWeightBranch)
cuts[cut_hash]['raw'] += rawEvents
cuts[cut_hash]['weighted'] += weightedEvents
events_tqdm.update(stop - start)
with open(
"{0:s}/{1:s}.json".format(output_directory, tree_name.decode('utf-8')), "w+"
) as f:
f.write(json.dumps(cuts, sort_keys=True, indent=4))
result = True
except:
logger.exception(
"Caught an error - skipping {0:s}".format(tree_name.decode('utf-8'))
)
result = False
end = clock()
return (result, end - start)
def extract_summary_items_from_name(interpretations, fmtstr, filename):
return dict(zip(interpretations.split(":"), re.search(fmtstr, filename)))
def get_summary(filename, interpretations, fmt_pattern, stop_masses=[]):
""" Primarily used from within do_summary
- given a significance file, the mass windows, produce a summary dictionary for it
"""
logger.info("\treading {0:s}".format(filename))
with open(filename) as f:
entry = json.load(f)[0]
cut_hash = entry["hash"]
significance = entry["significance_weighted"]
signal_yield = entry["yield_weighted"]["sig"]
bkgd_yield = entry["yield_weighted"]["bkg"]
ratio = -1
try:
ratio = signal_yield / bkgd_yield
except:
pass
return {
"hash": cut_hash,
"significance": significance,
"signal": signal_yield,
"bkgd": bkgd_yield,
"ratio": ratio,
"filename": filename,
**dict(zip(interpretations, fmt_pattern.search(filename).groups())),
}
|
{
"content_hash": "8e378b5fb0280cb12f690bbb0bba5963",
"timestamp": "",
"source": "github",
"line_count": 518,
"max_line_length": 107,
"avg_line_length": 31.77992277992278,
"alnum_prop": 0.5821285384521929,
"repo_name": "kratsg/optimization",
"id": "933c5a9410c0332bf3039c54b38a6cfd0d4b66e4",
"size": "16509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/root_optimize/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1235"
},
{
"name": "HCL",
"bytes": "823"
},
{
"name": "Makefile",
"bytes": "651"
},
{
"name": "Python",
"bytes": "125640"
},
{
"name": "Shell",
"bytes": "3157"
}
],
"symlink_target": ""
}
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from six import string_types
from twitter.common.dirutil.fileset import Fileset
from pants.base.build_environment import get_buildroot
class FilesetWithSpec(object):
"""A set of files with that keeps track of how we got it.
The filespec is what globs or file list it came from.
"""
def __init__(self, rel_root, result, filespec):
self._rel_root = rel_root
self._result = result
self.filespec = filespec
def __iter__(self):
return self._result.__iter__()
def __getitem__(self, index):
return self._result[index]
class FilesetRelPathWrapper(object):
def __init__(self, parse_context):
self.rel_path = parse_context.rel_path
def __call__(self, *args, **kwargs):
root = os.path.join(get_buildroot(), self.rel_path)
excludes = kwargs.pop('exclude', [])
if isinstance(excludes, string_types):
raise ValueError("Expected exclude parameter to be a list of globs, lists, or strings")
for i, exclude in enumerate(excludes):
if isinstance(exclude, string_types):
# You can't subtract raw strings from globs
excludes[i] = [exclude]
for glob in args:
if(self._is_glob_dir_outside_root(glob, root)):
raise ValueError('Invalid glob {}, points outside BUILD file root dir {}'.format(glob, root))
result = self.wrapped_fn(root=root, *args, **kwargs)
for exclude in excludes:
result -= exclude
buildroot = get_buildroot()
rel_root = os.path.relpath(root, buildroot)
filespec = self.to_filespec(args, root=rel_root, excludes=excludes)
return FilesetWithSpec(rel_root, result, filespec)
def _is_glob_dir_outside_root(self, glob, root):
# The assumption is that a correct glob starts with the root,
# even after normalizing.
glob_path = os.path.normpath(os.path.join(root, glob))
# Check if the glob path has the correct root.
return os.path.commonprefix([root, glob_path]) != root
def to_filespec(self, args, root='', excludes=None):
"""Return a dict representation of this glob list, relative to the buildroot.
The format of the dict is {'globs': [ 'list', 'of' , 'strings' ]
(optional) 'exclude' : [{'globs' : ... }, ...] }
The globs are in zglobs format.
"""
result = {'globs' : [os.path.join(root, arg) for arg in args]}
if excludes:
result['exclude'] = []
for exclude in excludes:
if hasattr(exclude, 'filespec'):
result['exclude'].append(exclude.filespec)
else:
result['exclude'].append({'globs' : [os.path.join(root, x) for x in exclude]})
return result
class Globs(FilesetRelPathWrapper):
"""Returns Fileset containing matching files in same directory as this BUILD file.
E.g., ``sources = globs('*java'),`` to get .java files in this directory.
:param exclude: a list of {,r,z}globs objects, strings, or lists of
strings to exclude. E.g. ``globs('*',exclude=[globs('*.java'),
'foo.py'])`` gives all files in this directory except ``.java``
files and ``foo.py``.
Deprecated:
You might see that old code uses "math" on the return value of
``globs()``. E.g., ``globs('*') - globs('*.java')`` gives all files
in this directory *except* ``.java`` files. Please use exclude
instead, since pants is moving to make BUILD files easier to parse,
and the new grammar will not support arithmetic.
"""
wrapped_fn = Fileset.globs
class RGlobs(FilesetRelPathWrapper):
"""Recursive ``globs``, returns Fileset matching files in this directory and its descendents.
E.g., ``bundle(fileset=rglobs('config/*')),`` to bundle up all files in
the config, config/foo, config/foo/bar directories.
:param exclude: a list of {,r,z}globs objects, strings, or lists of
strings to exclude. E.g. ``rglobs('config/*',exclude=[globs('config/*.java'),
'config/foo.py'])`` gives all files under config except ``.java`` files and ``config/foo.py``.
Deprecated:
You might see that old code uses "math" on the return value of ``rglobs()``. E.g.,
``rglobs('config/*') - rglobs('config/foo/*')`` gives all files under `config` *except*
those in ``config/foo``. Please use exclude instead, since pants is moving to
make BUILD files easier to parse, and the new grammar will not support arithmetic.
"""
@staticmethod
def rglobs_following_symlinked_dirs_by_default(*globspecs, **kw):
if 'follow_links' not in kw:
kw['follow_links'] = True
return Fileset.rglobs(*globspecs, **kw)
wrapped_fn = rglobs_following_symlinked_dirs_by_default
def to_filespec(self, args, root='', excludes=None):
# In rglobs, * at the beginning of a path component means "any
# number of directories, including 0". Unfortunately, "**" in
# some other systems, e.g. git means "one or more directories".
# So every time we see ^* or **, we need to output both
# "**/whatever" and "whatever".
rglobs = []
for arg in args:
components = arg.split(os.path.sep)
out = []
for component in components:
if component == '**':
if out and out[-1].startswith("**"):
continue
out.append(component)
elif component[0] == '*':
if out and out[-1].startswith("**"):
# We want to translate **/*.py to **/*.py, not **/**/*.py
out.append(component)
else:
out.append('**/' + component)
else:
out.append(component)
def rglob_path(beginning, rest):
if not rest:
return [beginning]
endings = []
for i, item in enumerate(rest):
if beginning and not beginning.endswith(os.path.sep):
beginning += os.path.sep
if item.startswith('**'):
# .../**/*.java
for ending in rglob_path(beginning + item, rest[i+1:]):
endings.append(ending)
# .../*.java
for ending in rglob_path(beginning + item[3:], rest[i+1:]):
endings.append(ending)
return endings
else:
if beginning and not beginning.endswith(os.path.sep):
beginning += os.path.sep + item
else:
beginning += item
return [beginning]
rglobs.extend(rglob_path('', out))
return super(RGlobs, self).to_filespec(rglobs, root=root, excludes=excludes)
class ZGlobs(FilesetRelPathWrapper):
"""Returns a FilesetWithSpec that matches zsh-style globs, including ``**/`` for recursive globbing.
Uses ``BUILD`` file's directory as the "working directory".
"""
@staticmethod
def zglobs_following_symlinked_dirs_by_default(*globspecs, **kw):
if 'follow_links' not in kw:
kw['follow_links'] = True
return Fileset.zglobs(*globspecs, **kw)
wrapped_fn = zglobs_following_symlinked_dirs_by_default
|
{
"content_hash": "031860b3f53d003e9e8e26936704ac6f",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 102,
"avg_line_length": 36.78865979381443,
"alnum_prop": 0.6361216197281772,
"repo_name": "digwanderlust/pants",
"id": "37b056bd09a2ceee5d445f302c2a08798704701a",
"size": "7137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/core/wrapped_globs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11139"
},
{
"name": "GAP",
"bytes": "4818"
},
{
"name": "HTML",
"bytes": "68162"
},
{
"name": "Java",
"bytes": "310901"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "7038"
},
{
"name": "Python",
"bytes": "3049918"
},
{
"name": "Scala",
"bytes": "77693"
},
{
"name": "Shell",
"bytes": "47201"
},
{
"name": "Thrift",
"bytes": "2824"
}
],
"symlink_target": ""
}
|
import requests
import time
from testutils import prefix, api_v0
test_user = 'test_user'
@prefix('test_v0_create_team')
def test_api_v0_create_team(team):
team_name = "v0_create_team_team"
requests.delete(api_v0('teams/'+team_name))
re = requests.post(api_v0('teams'), json={"name": team_name, 'scheduling_timezone': 'utc'})
assert re.status_code == 201
# Add to team fixture to ensure cleanup
team.mark_for_cleaning(team_name)
@prefix('test_v0_create_team_with_space')
def test_api_v0_create_team_with_space(team):
team_name = "v0_create_team_team"
team_name_with_space = " v0_create_team_team "
requests.delete(api_v0('teams/'+team_name))
re = requests.post(api_v0('teams'), json={"name": team_name_with_space, 'scheduling_timezone': 'utc'})
teams = requests.get(api_v0('teams')).json()
assert team_name in teams
assert team_name_with_space not in teams
# Add to team fixture to ensure cleanup
team.mark_for_cleaning(team_name)
@prefix('test_v0_invalid_team')
def test_api_v0_create_invalid_team(team):
invalid_name = "v0_create_<inv@lid/_team"
re = requests.post(api_v0('teams'),
json={"name": invalid_name, 'scheduling_timezone': 'utc'})
assert re.status_code == 400
team_name = team.create()
re = requests.put(api_v0('teams/%s' % team_name),
json={"name": invalid_name})
assert re.status_code == 400
@prefix('test_v0_get_teams')
def test_api_v0_get_teams(team):
team_name = team.create()
re = requests.get(api_v0('teams'))
assert re.status_code == 200
teams = re.json()
assert isinstance(teams, list)
assert len(teams) >= 1
assert team_name in teams
@prefix('test_v0_get_team')
def test_api_v0_get_team(team, role, roster, schedule):
team_name = team.create()
role_name = role.create()
roster_name = roster.create(team_name)
schedule.create(team_name, roster_name, {'role': role_name,
'events': [{'start': 0, 'duration': 60*60*24*7}],
'advanced_mode': 0})
# by default, it should return everything
re = requests.get(api_v0('teams/'+team_name))
assert re.status_code == 200
team = re.json()
assert isinstance(team, dict)
expected_set = {'users', 'admins', 'services', 'rosters', 'name', 'id', 'slack_channel', 'slack_channel_notifications', 'email',
'scheduling_timezone', 'iris_plan', 'iris_enabled', 'override_phone_number'}
assert expected_set == set(team.keys())
# it should also support filter by fields
re = requests.get(api_v0('teams/%s?fields=users&fields=services&fields=admins' % team_name))
assert re.status_code == 200
team = re.json()
assert isinstance(team, dict)
expected_set = {'users', 'admins', 'services', 'name', 'id', 'slack_channel', 'slack_channel_notifications', 'email',
'scheduling_timezone', 'iris_plan', 'iris_enabled', 'override_phone_number'}
assert expected_set == set(team.keys())
@prefix('test_v0_delete_team')
def test_api_v0_delete_team(team):
team_name = team.create()
requests.post(api_v0('teams'), json={"name": team_name})
re = requests.delete(api_v0('teams/'+team_name))
assert re.status_code == 200
re = requests.get(api_v0('teams/'+team_name))
assert re.status_code == 404
@prefix('test_v0_update_team')
def test_api_v0_update_team(team):
team_name = team.create()
new_team_name = "new-moninfra-update"
email = 'abc@gmail.com'
slack = '#slack'
slack_notifications = '#slack-alerts'
override_num = '1234'
# setup DB state
requests.delete(api_v0('teams/'+new_team_name))
re = requests.get(api_v0('teams/'+new_team_name))
assert re.status_code == 404
re = requests.get(api_v0('teams/'+team_name))
assert re.status_code == 200
# edit team name/email/slack
re = requests.put(api_v0('teams/'+team_name), json={'name': new_team_name,
'email': email,
'slack_channel': slack,
'slack_channel_notifications': slack_notifications,
'override_phone_number': override_num})
assert re.status_code == 200
team.mark_for_cleaning(new_team_name)
# verify result
re = requests.get(api_v0('teams/'+team_name))
assert re.status_code == 404
re = requests.get(api_v0('teams/'+new_team_name))
assert re.status_code == 200
data = re.json()
assert data['email'] == email
assert data['slack_channel'] == slack
assert data['slack_channel_notifications'] == slack_notifications
assert data['override_phone_number'] == override_num
@prefix('test_v0_team_admin')
def test_api_v0_team_admin(team, user):
team_name = team.create()
re = requests.get(api_v0('teams/%s/admins') % team_name)
assert re.status_code == 200
# Make sure the test user was made an admin after making the team
assert len(re.json()) == 1
admin_user = user.create()
# test create admin
re = requests.post(api_v0('teams/%s/admins' % team_name),
json={'name': admin_user})
assert re.status_code == 201
# verify result
re = requests.get(api_v0('teams/%s/admins' % team_name))
assert re.status_code == 200
assert admin_user in set(re.json())
# user should be also added to team automatically
re = requests.get(api_v0('teams/%s' % team_name))
assert re.status_code == 200
assert admin_user in re.json()['users']
# test delete admin
re = requests.delete(api_v0('teams/%s/admins/%s' % (team_name, admin_user)))
# verify result
re = requests.get(api_v0('teams/%s/admins' % team_name))
assert re.status_code == 200
assert admin_user not in set(re.json())
@prefix('test_v0_team_members')
def test_api_v0_team_members(team, user, roster):
team_name = team.create()
roster_name = roster.create(team_name)
user_name = user.create()
user_name_2 = user.create()
user_name_3 = user.create()
none_exist_user = 'team_users_test_random1231_user'
# make sure we start with an empty team
re = requests.get(api_v0('teams/%s/users' % team_name))
assert re.status_code == 200
users = re.json()
assert isinstance(users, list)
assert len(users) == 1
# test add invalid user to the team
re = requests.post(api_v0('teams/%s/users') % team_name, json={'name': none_exist_user})
assert re.status_code == 422
re.json() == {
'title': 'IntegrityError',
'description': 'user %s not found' % none_exist_user
}
# test add user to team
re = requests.post(api_v0('teams/%s/users') % team_name, json={'name': user_name})
assert re.status_code == 201
# verify team members
re = requests.get(api_v0('teams/%s/users' % team_name))
assert re.status_code == 200
users = re.json()
assert isinstance(users, list)
assert set(users) == set([user_name, test_user])
# test duplicate user creation
re = requests.post(api_v0('teams/%s/users') % team_name, json={'name': user_name})
assert re.status_code == 422
assert re.json() == {
'title': 'IntegrityError',
'description': 'user name "%s" is already in team %s' % (user_name, team_name)
}
# test delete user from team
re = requests.delete(api_v0('teams/%s/users/%s' % (team_name, user_name)))
assert re.status_code == 200
re = requests.get(api_v0('teams/%s/users' % team_name))
assert re.status_code == 200
users = re.json()
assert isinstance(users, list)
assert set(users) == set([test_user])
# test create admin
re = requests.post(api_v0('teams/%s/admins' % team_name),
json={'name': user_name_2})
assert re.status_code == 201
re = requests.get(api_v0('teams/%s/users' % team_name))
assert re.status_code == 200
users = re.json()
assert isinstance(users, list)
assert set(users) == set([test_user, user_name_2])
# test add user to roster
re = requests.post(api_v0('teams/%s/rosters/%s/users' % (team_name, roster_name)),
json={'name': user_name_3})
assert re.status_code == 201
re = requests.post(api_v0('teams/%s/rosters/%s/users' % (team_name, roster_name)),
json={'name': user_name_2})
assert re.status_code == 201
re = requests.get(api_v0('teams/%s/users' % team_name))
assert re.status_code == 200
users = re.json()
assert isinstance(users, list)
assert set(users) == set([test_user, user_name_2, user_name_3])
# delete admin/roster-member from team admins, check that they're not removed from team
re = requests.post(api_v0('teams/%s/admins' % team_name),
json={'name': user_name_3})
assert re.status_code == 201
re = requests.delete(api_v0('teams/%s/admins/%s' % (team_name, user_name_3)))
assert re.status_code == 200
re = requests.get(api_v0('teams/%s/users' % team_name))
assert re.status_code == 200
users = re.json()
assert isinstance(users, list)
assert set(users) == set([test_user, user_name_2, user_name_3])
# delete from roster too, check they're removed
re = requests.delete(
api_v0('teams/%s/rosters/%s/users/%s' % (team_name, roster_name, user_name_3)))
assert re.status_code == 200
re = requests.get(api_v0('teams/%s/users' % team_name))
assert re.status_code == 200
users = re.json()
assert isinstance(users, list)
assert set(users) == set([test_user, user_name_2])
# make sure roster but no admin stays in team
re = requests.delete(api_v0('teams/%s/admins/%s' % (team_name, user_name_2)))
assert re.status_code == 200
re = requests.get(api_v0('teams/%s/users' % team_name))
assert re.status_code == 200
users = re.json()
assert isinstance(users, list)
assert set(users) == set([test_user, user_name_2])
# delete from roster too, check that they're removed
re = requests.delete(
api_v0('teams/%s/rosters/%s/users/%s' % (team_name, roster_name, user_name_2)))
assert re.status_code == 200
re = requests.get(api_v0('teams/%s/users' % team_name))
assert re.status_code == 200
users = re.json()
assert isinstance(users, list)
assert set(users) == set([test_user])
@prefix('test_v0_summary')
def test_api_v0_team_summary(team, user, role, event):
team_name = team.create()
user_name = user.create()
user_name_2 = user.create()
role_name = role.create()
role_name_2 = role.create()
user.add_to_team(user_name, team_name)
user.add_to_team(user_name_2, team_name)
start, end = int(time.time()), int(time.time()+36000)
event_data_1 = {'start': start,
'end': end,
'user': user_name,
'team': team_name,
'role': role_name}
event_data_2 = {'start': start - 5,
'end': end - 5,
'user': user_name_2,
'team': team_name,
'role': role_name_2}
event_data_3 = {'start': start + 50000,
'end': end + 50000,
'user': user_name,
'team': team_name,
'role': role_name}
event_data_4 = {'start': start + 50005,
'end': end + 50005,
'user': user_name_2,
'team': team_name,
'role': role_name_2}
event_data_5 = {'start': start + 50001,
'end': end + 50001,
'user': user_name,
'team': team_name,
'role': role_name}
# Create current events
event.create(event_data_1)
event.create(event_data_2)
# Create next events
event.create(event_data_3)
event.create(event_data_4)
# Create extra future event that isn't the next event
event.create(event_data_5)
re = requests.get(api_v0('teams/%s/summary' % team_name))
assert re.status_code == 200
results = re.json()
keys = ['start', 'end', 'role', 'user']
assert all(results['current'][role_name][0][key] == event_data_1[key] for key in keys)
assert all(results['current'][role_name_2][0][key] == event_data_2[key] for key in keys)
assert all(results['next'][role_name][0][key] == event_data_3[key] for key in keys)
assert all(results['next'][role_name_2][0][key] == event_data_4[key] for key in keys)
@prefix('test_v0_summary')
def test_api_v0_non_exist_team_summary(team, user, role, event):
re = requests.get(api_v0('teams/fobar123bac-n-o-t-found/summary'))
assert re.status_code == 404
@prefix('test_v0_team_oncall')
def test_api_v0_team_current_oncall(team, user, role, event):
team_name = team.create()
user_name = user.create()
user_name_2 = user.create()
role_name = role.create()
role_name_2 = role.create()
user.add_to_team(user_name, team_name)
user.add_to_team(user_name_2, team_name)
start, end = int(time.time()), int(time.time()+36000)
event_data_1 = {'start': start,
'end': end,
'user': user_name,
'team': team_name,
'role': role_name}
event_data_2 = {'start': start - 5,
'end': end - 5,
'user': user_name_2,
'team': team_name,
'role': role_name_2}
event.create(event_data_1)
event.create(event_data_2)
re = requests.get(api_v0('teams/%s/oncall/%s' % (team_name, role_name)))
assert re.status_code == 200
results = re.json()
assert results[0]['start'] == start
assert results[0]['end'] == end
re = requests.get(api_v0('teams/%s/oncall' % team_name))
assert re.status_code == 200
results = re.json()
assert len(results) == 2
@prefix('test_v0_team_override_number')
def test_api_v0_team_override_number(team, user, role, event):
team_name = team.create()
user_name = user.create()
user_name_2 = user.create()
user.add_to_team(user_name, team_name)
user.add_to_team(user_name_2, team_name)
start, end = int(time.time()), int(time.time()+36000)
event_data_1 = {'start': start,
'end': end,
'user': user_name,
'team': team_name,
'role': 'primary'}
event.create(event_data_1)
override_num = '12345'
re = requests.put(api_v0('teams/'+team_name), json={'override_phone_number': override_num})
re = requests.get(api_v0('teams/%s/oncall/%s' % (team_name, 'primary')))
assert re.status_code == 200
results = re.json()
assert results[0]['start'] == start
assert results[0]['end'] == end
assert results[0]['contacts']['call'] == override_num
re = requests.get(api_v0('teams/%s/oncall' % team_name))
assert re.status_code == 200
results = re.json()
assert results[0]['contacts']['call'] == override_num
re = requests.get(api_v0('teams/%s/summary' % team_name))
assert results[0]['contacts']['call'] == override_num
|
{
"content_hash": "b9678302e8661484db32b247c7672958",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 132,
"avg_line_length": 37.41951219512195,
"alnum_prop": 0.5904054230217703,
"repo_name": "diegocepedaw/oncall",
"id": "85c5b97b7e91ff27fdd7fb82ae850b20d03bfb0d",
"size": "15525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "e2e/test_teams.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "58228"
},
{
"name": "Dockerfile",
"bytes": "1343"
},
{
"name": "HTML",
"bytes": "111406"
},
{
"name": "JavaScript",
"bytes": "456677"
},
{
"name": "Makefile",
"bytes": "400"
},
{
"name": "Procfile",
"bytes": "183"
},
{
"name": "Python",
"bytes": "496637"
},
{
"name": "Shell",
"bytes": "1558"
},
{
"name": "Smarty",
"bytes": "704"
}
],
"symlink_target": ""
}
|
from ryu.services.protocols.bgp.signals import SignalBus
class BgpSignalBus(SignalBus):
BGP_ERROR = ('error', 'bgp')
BGP_DEST_CHANGED = ('core', 'dest', 'changed')
BGP_VRF_REMOVED = ('core', 'vrf', 'removed')
BGP_VRF_ADDED = ('core', 'vrf', 'added')
BGP_NOTIFICATION_RECEIVED = ('bgp', 'notification_received')
BGP_NOTIFICATION_SENT = ('bgp', 'notification_sent')
BGP_VRF_STATS_CONFIG_CHANGED = (
'core', 'vrf', 'config', 'stats', 'changed'
)
def bgp_error(self, peer, code, subcode, reason):
return self.emit_signal(
self.BGP_ERROR + (peer, ),
{'code': code, 'subcode': subcode, 'reason': reason, 'peer': peer}
)
def bgp_notification_received(self, peer, notification):
return self.emit_signal(
self.BGP_NOTIFICATION_RECEIVED + (peer,),
notification
)
def bgp_notification_sent(self, peer, notification):
return self.emit_signal(
self.BGP_NOTIFICATION_SENT + (peer,),
notification
)
def dest_changed(self, dest):
return self.emit_signal(
self.BGP_DEST_CHANGED,
dest
)
def vrf_removed(self, route_dist):
return self.emit_signal(
self.BGP_VRF_REMOVED,
route_dist
)
def vrf_added(self, vrf_conf):
return self.emit_signal(
self.BGP_VRF_ADDED,
vrf_conf
)
def stats_config_changed(self, vrf_conf):
return self.emit_signal(
self.BGP_VRF_STATS_CONFIG_CHANGED,
vrf_conf
)
|
{
"content_hash": "9d1a0e2cc6a05cc6627a8c5c5965e895",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 78,
"avg_line_length": 29.472727272727273,
"alnum_prop": 0.5638494756323257,
"repo_name": "ntts-clo/ryu",
"id": "7f41c93f50406dc504e786e2adef39179169fc4d",
"size": "1621",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ryu/services/protocols/bgp/signals/emit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Erlang",
"bytes": "870216"
},
{
"name": "Python",
"bytes": "4304226"
},
{
"name": "Shell",
"bytes": "14336"
}
],
"symlink_target": ""
}
|
from datetime import timedelta
from django.db import transaction, models, router
from django.utils import timezone
from reversion.models import Revision, Version
from reversion.management.commands import BaseRevisionCommand
class Command(BaseRevisionCommand):
help = "Deletes revisions for a given app [and model]."
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--days",
default=0,
type=int,
help="Delete only revisions older than the specified number of days.",
)
parser.add_argument(
"--keep",
default=0,
type=int,
help="Keep the specified number of revisions (most recent) for each object.",
)
def handle(self, *app_labels, **options):
verbosity = options["verbosity"]
using = options["using"]
model_db = options["model_db"]
days = options["days"]
keep = options["keep"]
# Delete revisions.
using = using or router.db_for_write(Revision)
with transaction.atomic(using=using):
revision_query = models.Q()
keep_revision_ids = set()
# By default, delete nothing.
can_delete = False
# Get all revisions for the given revision manager and model.
for model in self.get_models(options):
if verbosity >= 1:
self.stdout.write("Finding stale revisions for {name}".format(
name=model._meta.verbose_name,
))
# Find all matching revision IDs.
model_query = Version.objects.using(using).get_for_model(
model,
model_db=model_db,
)
if keep:
overflow_object_ids = list(Version.objects.using(using).get_for_model(
model,
model_db=model_db,
).order_by().values_list("object_id").annotate(
count=models.Count("object_id"),
).filter(
count__gt=keep,
).values_list("object_id", flat=True).iterator())
# Only delete overflow revisions.
model_query = model_query.filter(object_id__in=overflow_object_ids)
for object_id in overflow_object_ids:
if verbosity >= 2:
self.stdout.write("- Finding stale revisions for {name} #{object_id}".format(
name=model._meta.verbose_name,
object_id=object_id,
))
# But keep the underflow revisions.
keep_revision_ids.update(Version.objects.using(using).get_for_object_reference(
model,
object_id,
model_db=model_db,
).values_list("revision_id", flat=True)[:keep].iterator())
# Add to revision query.
revision_query |= models.Q(
pk__in=model_query.order_by().values_list("revision_id", flat=True)
)
# If we have at least one model, then we can delete.
can_delete = True
if can_delete:
revisions_to_delete = Revision.objects.using(using).filter(
revision_query,
date_created__lt=timezone.now() - timedelta(days=days),
).exclude(
pk__in=keep_revision_ids
).order_by()
else:
revisions_to_delete = Revision.objects.using(using).none()
# Print out a message, if feeling verbose.
if verbosity >= 1:
self.stdout.write("Deleting {total} revisions...".format(
total=revisions_to_delete.count(),
))
revisions_to_delete.delete()
|
{
"content_hash": "44045b45b0269f99e6d2ca73132010c7",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 105,
"avg_line_length": 43.744680851063826,
"alnum_prop": 0.5038910505836576,
"repo_name": "etianen/django-reversion",
"id": "cbece29d50a6f258bbb3ca55729972c2090d5e73",
"size": "4112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reversion/management/commands/deleterevisions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "6265"
},
{
"name": "Python",
"bytes": "130019"
}
],
"symlink_target": ""
}
|
import jasy.core.FlagSet as FlagSet
import jasy.script.optimize.CryptPrivates as CryptPrivates
import jasy.script.optimize.BlockReducer as BlockReducer
import jasy.script.optimize.LocalVariables as LocalVariables
import jasy.script.optimize.CombineDeclarations as CombineDeclarations
import jasy.script.optimize.ClosureWrapper as ClosureWrapper
class Error(Exception):
"""Error object which is raised whenever an optimization could not be applied correctly."""
def __init__(self, msg):
self.__msg = msg
def __str__(self):
return "Error during optimization! %s" % (self.__msg)
class Optimization(FlagSet.FlagSet):
"""
Configures an optimization object which can be used to compress classes afterwards.
The optimization set is frozen after initialization which also generates the unique key based on the given
optimizations.
"""
def apply(self, tree):
"""
Applies the configured optimizations to the given node tree.
Modifies the tree in-place
to be sure to have a deep copy if you need the original one. It raises an error instance
whenever any optimization could not be applied to the given tree.
"""
if self.has("wrap"):
try:
ClosureWrapper.optimize(tree)
except CryptPrivates.Error as err:
raise Error(err)
if self.has("declarations"):
try:
CombineDeclarations.optimize(tree)
except CombineDeclarations.Error as err:
raise Error(err)
if self.has("blocks"):
try:
BlockReducer.optimize(tree)
except BlockReducer.Error as err:
raise Error(err)
if self.has("variables"):
try:
LocalVariables.optimize(tree)
except LocalVariables.Error as err:
raise Error(err)
if self.has("privates"):
try:
CryptPrivates.optimize(tree, tree.fileId)
except CryptPrivates.Error as err:
raise Error(err)
|
{
"content_hash": "a35d811470f4f6dc9a94c45865d15238",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 110,
"avg_line_length": 30.3,
"alnum_prop": 0.6317774634606318,
"repo_name": "sebastian-software/jasy",
"id": "3b823a14f4e57f5c9eaa746a235280f9fa4f981e",
"size": "2229",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jasy/script/output/Optimization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "913"
},
{
"name": "Python",
"bytes": "1043666"
},
{
"name": "Shell",
"bytes": "1926"
}
],
"symlink_target": ""
}
|
import unittest
import IECore
import Gaffer
import GafferTest
class ProceduralHolderTest( unittest.TestCase ) :
def testType( self ) :
n = Gaffer.ProceduralHolder()
self.assertEqual( n.typeName(), "Gaffer::ProceduralHolder" )
self.failUnless( n.isInstanceOf( Gaffer.ParameterisedHolderComputeNode.staticTypeId() ) )
self.failUnless( n.isInstanceOf( Gaffer.ComputeNode.staticTypeId() ) )
self.failUnless( n.isInstanceOf( Gaffer.DependencyNode.staticTypeId() ) )
def testCompute( self ) :
n = Gaffer.ProceduralHolder()
classSpec = GafferTest.ParameterisedHolderTest.classSpecification( "read", "IECORE_PROCEDURAL_PATHS" )[:-1]
n.setProcedural( *classSpec )
p = n["output"].getValue()
self.failUnless( isinstance( p, IECore.ReadProcedural ) )
self.assertEqual( p.parameters().getValue(), n.getProcedural().parameters().getValue() )
def testAffects( self ) :
n = Gaffer.ProceduralHolder()
classSpec = GafferTest.ParameterisedHolderTest.classSpecification( "read", "IECORE_PROCEDURAL_PATHS" )[:-1]
n.setProcedural( *classSpec )
a = n.affects( n["parameters"]["motion"]["blur"] )
self.assertEqual( len( a ), 1 )
self.failUnless( a[0].isSame( n["output"] ) )
def testHash( self ) :
n = Gaffer.ProceduralHolder()
classSpec = GafferTest.ParameterisedHolderTest.classSpecification( "read", "IECORE_PROCEDURAL_PATHS" )[:-1]
n.setProcedural( *classSpec )
h1 = n["output"].hash()
n["parameters"]["files"]["name"].setValue( "something.cob" )
self.assertNotEqual( h1, n["output"].hash() )
def testRunTimeTyped( self ) :
n = Gaffer.ProceduralHolder()
self.assertEqual( n.typeName(), "Gaffer::ProceduralHolder" )
self.assertEqual( IECore.RunTimeTyped.typeNameFromTypeId( n.typeId() ), "Gaffer::ProceduralHolder" )
self.assertEqual( IECore.RunTimeTyped.baseTypeId( n.typeId() ), Gaffer.ParameterisedHolderComputeNode.staticTypeId() )
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "f16b31a5730e274bb0eba297fcadfd5b",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 120,
"avg_line_length": 32.42622950819672,
"alnum_prop": 0.7067745197168858,
"repo_name": "davidsminor/gaffer",
"id": "ce92a45192f34d748ea94dcffef07d502dd48add",
"size": "3857",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "python/GafferTest/ProceduralHolderTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9286"
},
{
"name": "C++",
"bytes": "3358250"
},
{
"name": "COBOL",
"bytes": "64449"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "Python",
"bytes": "3267354"
},
{
"name": "Shell",
"bytes": "7055"
},
{
"name": "Slash",
"bytes": "35200"
}
],
"symlink_target": ""
}
|
"""Define the Django Silk setup."""
import os
from setuptools import setup
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
README = open('README.md', 'rb').read().decode("UTF-8")
setup(
name='django-silk',
version='3.0.4',
packages=['silk'],
include_package_data=True,
license='MIT License',
description='Silky smooth profiling for the Django Framework',
long_description=README,
long_description_content_type='text/markdown',
url='https://github.com/jazzband/django-silk',
author='Michael Ford',
author_email='mtford@gmail.com',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.11',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Framework :: Django :: 2.2',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
install_requires=[
'Django>=1.11',
'Pygments',
'python-dateutil',
'requests',
'sqlparse',
'Jinja2',
'autopep8',
'pytz',
'gprof2dot>=2017.09.19',
]
)
|
{
"content_hash": "393e5b15b28bae1fe7566141173c98e1",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 78,
"avg_line_length": 32.5,
"alnum_prop": 0.5789173789173789,
"repo_name": "mtford90/silk",
"id": "ebc938ca4ccff78affdf4c9d601408eac39b24e4",
"size": "1779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23911"
},
{
"name": "HTML",
"bytes": "60729"
},
{
"name": "JavaScript",
"bytes": "85804"
},
{
"name": "Python",
"bytes": "218742"
},
{
"name": "Shell",
"bytes": "292"
}
],
"symlink_target": ""
}
|
"""
Unit tests for Spark ML Python APIs.
"""
import sys
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark.tests import ReusedPySparkTestCase as PySparkTestCase
from pyspark.sql import DataFrame
from pyspark.ml.param import Param
from pyspark.ml.pipeline import Transformer, Estimator, Pipeline
class MockDataset(DataFrame):
def __init__(self):
self.index = 0
class MockTransformer(Transformer):
def __init__(self):
super(MockTransformer, self).__init__()
self.fake = Param(self, "fake", "fake", None)
self.dataset_index = None
self.fake_param_value = None
def transform(self, dataset, params={}):
self.dataset_index = dataset.index
if self.fake in params:
self.fake_param_value = params[self.fake]
dataset.index += 1
return dataset
class MockEstimator(Estimator):
def __init__(self):
super(MockEstimator, self).__init__()
self.fake = Param(self, "fake", "fake", None)
self.dataset_index = None
self.fake_param_value = None
self.model = None
def fit(self, dataset, params={}):
self.dataset_index = dataset.index
if self.fake in params:
self.fake_param_value = params[self.fake]
model = MockModel()
self.model = model
return model
class MockModel(MockTransformer, Transformer):
def __init__(self):
super(MockModel, self).__init__()
class PipelineTests(PySparkTestCase):
def test_pipeline(self):
dataset = MockDataset()
estimator0 = MockEstimator()
transformer1 = MockTransformer()
estimator2 = MockEstimator()
transformer3 = MockTransformer()
pipeline = Pipeline() \
.setStages([estimator0, transformer1, estimator2, transformer3])
pipeline_model = pipeline.fit(dataset, {estimator0.fake: 0, transformer1.fake: 1})
self.assertEqual(0, estimator0.dataset_index)
self.assertEqual(0, estimator0.fake_param_value)
model0 = estimator0.model
self.assertEqual(0, model0.dataset_index)
self.assertEqual(1, transformer1.dataset_index)
self.assertEqual(1, transformer1.fake_param_value)
self.assertEqual(2, estimator2.dataset_index)
model2 = estimator2.model
self.assertIsNone(model2.dataset_index, "The model produced by the last estimator should "
"not be called during fit.")
dataset = pipeline_model.transform(dataset)
self.assertEqual(2, model0.dataset_index)
self.assertEqual(3, transformer1.dataset_index)
self.assertEqual(4, model2.dataset_index)
self.assertEqual(5, transformer3.dataset_index)
self.assertEqual(6, dataset.index)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "f8ff79f7adc6a4ae310bd95c9e2f5609",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 98,
"avg_line_length": 31.071428571428573,
"alnum_prop": 0.6400656814449918,
"repo_name": "hengyicai/OnlineAggregationUCAS",
"id": "b627c2b4e930bf5e5a2e36b671f3a2000cd11054",
"size": "3830",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/pyspark/ml/tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "35774"
},
{
"name": "CSS",
"bytes": "4512"
},
{
"name": "Java",
"bytes": "605689"
},
{
"name": "JavaScript",
"bytes": "21537"
},
{
"name": "Makefile",
"bytes": "6840"
},
{
"name": "Python",
"bytes": "924210"
},
{
"name": "Roff",
"bytes": "5379"
},
{
"name": "SQLPL",
"bytes": "3603"
},
{
"name": "Scala",
"bytes": "6747601"
},
{
"name": "Shell",
"bytes": "141008"
}
],
"symlink_target": ""
}
|
from lxml import etree
from glob import iglob
from pprint import pprint
import tarfile
from common import EXPORTS_PATH
def ted_documents():
for compl in iglob(EXPORTS_PATH + '/*.tgz'):
tf = tarfile.open(compl, 'r:gz')
for member in tf.getmembers():
if not member.name.endswith('.xml'):
continue
fh = tf.extractfile(member)
#print member, len(fh.read())
#doc = etree.parse(fh)
yield member.name, fh.read()
fh.close()
class Extractor(object):
def __init__(self, el):
self.el = el
self.paths = {}
self._ignore = set()
self.generate(el)
def element_name(self, el):
if el == self.el:
return '.'
return self.element_name(el.getparent()) + '/' + el.tag
def generate(self, el):
children = el.getchildren()
if len(children):
for child in children:
self.generate(child)
else:
name = self.element_name(el)
if not name in self.paths:
self.paths[name] = el
def ignore(self, path):
if path.endswith('*'):
path = path[:len(path)-1]
for p in self.paths.keys():
if p.startswith(path):
self._ignore.add(p)
else:
self._ignore.add(path)
def text(self, path, ignore=True):
if path is None:
return
el = self.el.find(path)
if el is None:
return None
if ignore:
self.ignore(self.element_name(el))
return el.text
def html(self, path, ignore=True):
if path is None:
return
el = self.el.find(path)
if el is None:
return None
if ignore:
self.ignore(self.element_name(el))
return etree.tostring(el)
def attr(self, path, attr, ignore=True):
if path is None:
return
el = self.el.find(path)
if el is None:
return None
if ignore:
self.ignore(self.element_name(el))
return el.get(attr)
def audit(self):
#print "UNPARSED:"
for k, v in sorted(self.paths.items()):
if k in self._ignore:
continue
if v.text or len(v.attrib.keys()):
pprint({
'path': k,
'text': v.text,
'attr': v.attrib
})
|
{
"content_hash": "c44197ed9697313a9c313f3bab657430",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 63,
"avg_line_length": 26.568421052631578,
"alnum_prop": 0.49247226624405704,
"repo_name": "pudo-attic/ted-xml",
"id": "37859378406dac65aa3332349cfe711de229ef2f",
"size": "2524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forms/parseutil.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25701"
}
],
"symlink_target": ""
}
|
from nltk.corpus import gutenberg
from nltk import ConditionalFreqDist
from random import choice
cfd = ConditionalFreqDist()
prev_word = None
for word in gutenberg.words('austen-persuasion.txt'):
cfd[prev_word][word] += 1
prev_word = word
word = 'therefore'
i = 1
# Find all words that can possibly follow the current word
# and choose one at random
while i < 20:
print(word, end=' ')
lwords = list(cfd[word].keys())
follower = choice(lwords)
word = follower
i += 1
|
{
"content_hash": "ac7843eb777b0fecd71b84c15edb4543",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 58,
"avg_line_length": 23.666666666666668,
"alnum_prop": 0.6941649899396378,
"repo_name": "desilinguist/acm-crossroads-nltk",
"id": "14ab8f9153c7369a383c8b37bc4e8db40cb84e38",
"size": "521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "task2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "52432"
},
{
"name": "Python",
"bytes": "4561"
},
{
"name": "TeX",
"bytes": "36253"
}
],
"symlink_target": ""
}
|
from tempest import cli
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
class SimpleReadOnlyNovaManageTest(cli.ClientTestBase):
"""
This is a first pass at a simple read only nova-manage test. This
only exercises client commands that are read only.
This should test commands:
* with and without optional parameters
* initially just check return codes, and later test command outputs
"""
@classmethod
def setUpClass(cls):
if not CONF.service_available.nova:
msg = ("%s skipped as Nova is not available" % cls.__name__)
raise cls.skipException(msg)
if not CONF.cli.has_manage:
msg = ("%s skipped as *-manage commands not available"
% cls.__name__)
raise cls.skipException(msg)
super(SimpleReadOnlyNovaManageTest, cls).setUpClass()
def test_admin_fake_action(self):
self.assertRaises(exceptions.CommandFailed,
self.nova_manage,
'this-does-nova-exist')
# NOTE(jogo): Commands in order listed in 'nova-manage -h'
# test flags
def test_help_flag(self):
self.nova_manage('', '-h')
def test_version_flag(self):
# Bug 1159957: nova-manage --version writes to stderr
self.assertNotEqual("", self.nova_manage('', '--version',
merge_stderr=True))
self.assertEqual(self.nova_manage('version'),
self.nova_manage('', '--version', merge_stderr=True))
def test_debug_flag(self):
self.assertNotEqual("", self.nova_manage('flavor list',
'--debug'))
def test_verbose_flag(self):
self.assertNotEqual("", self.nova_manage('flavor list',
'--verbose'))
# test actions
def test_version(self):
self.assertNotEqual("", self.nova_manage('version'))
def test_flavor_list(self):
self.assertNotEqual("", self.nova_manage('flavor list'))
def test_db_archive_deleted_rows(self):
# make sure command doesn't error out
self.nova_manage('db archive_deleted_rows 50')
def test_db_sync(self):
# make sure command doesn't error out
self.nova_manage('db sync')
def test_db_version(self):
self.assertNotEqual("", self.nova_manage('db version'))
def test_cell_list(self):
# make sure command doesn't error out
self.nova_manage('cell list')
def test_host_list(self):
# make sure command doesn't error out
self.nova_manage('host list')
|
{
"content_hash": "c04dad862b44617b179c95fdb61c2afa",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 78,
"avg_line_length": 32.63095238095238,
"alnum_prop": 0.6056183874498359,
"repo_name": "Mirantis/tempest",
"id": "dae0cf860593e1abda5ef328d2c7d59ae5c9c433",
"size": "3377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/cli/simple_read_only/test_nova_manage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3297127"
},
{
"name": "Shell",
"bytes": "8663"
}
],
"symlink_target": ""
}
|
"""
Interfaces for Hadoop filesystem access via HttpFs/WebHDFS
"""
import errno
import logging
import posixpath
import stat
import threading
import time
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from desktop.lib.rest import http_client, resource
from hadoop.fs import normpath, SEEK_SET, SEEK_CUR, SEEK_END
from hadoop.fs.hadoopfs import Hdfs
from hadoop.fs.exceptions import WebHdfsException
from hadoop.fs.webhdfs_types import WebHdfsStat, WebHdfsContentSummary
from hadoop.conf import UPLOAD_CHUNK_SIZE
from hadoop.hdfs_site import get_nn_sentry_prefixes, get_umask_mode
import hadoop.conf
import desktop.conf
DEFAULT_HDFS_SUPERUSER = desktop.conf.DEFAULT_HDFS_SUPERUSER.get()
# The number of bytes to read if not specified
DEFAULT_READ_SIZE = 1024*1024 # 1MB
LOG = logging.getLogger(__name__)
class WebHdfs(Hdfs):
"""
WebHdfs implements the filesystem interface via the WebHDFS rest protocol.
"""
DEFAULT_USER = desktop.conf.DEFAULT_USER.get() # This should be the user running Hue
TRASH_CURRENT = 'Current'
def __init__(self, url,
fs_defaultfs,
logical_name=None,
hdfs_superuser=None,
security_enabled=False,
ssl_cert_ca_verify=True,
temp_dir="/tmp",
umask=01022):
self._url = url
self._superuser = hdfs_superuser
self._security_enabled = security_enabled
self._ssl_cert_ca_verify = ssl_cert_ca_verify
self._temp_dir = temp_dir
self._umask = umask
self._fs_defaultfs = fs_defaultfs
self._logical_name = logical_name
self._client = self._make_client(url, security_enabled, ssl_cert_ca_verify)
self._root = resource.Resource(self._client)
# To store user info
self._thread_local = threading.local()
LOG.debug("Initializing Hadoop WebHdfs: %s (security: %s, superuser: %s)" % (self._url, self._security_enabled, self._superuser))
@classmethod
def from_config(cls, hdfs_config):
fs_defaultfs = hdfs_config.FS_DEFAULTFS.get()
return cls(url=_get_service_url(hdfs_config),
fs_defaultfs=fs_defaultfs,
logical_name=hdfs_config.LOGICAL_NAME.get(),
security_enabled=hdfs_config.SECURITY_ENABLED.get(),
ssl_cert_ca_verify=hdfs_config.SSL_CERT_CA_VERIFY.get(),
temp_dir=hdfs_config.TEMP_DIR.get(),
umask=get_umask_mode())
def __str__(self):
return "WebHdfs at %s" % self._url
def _make_client(self, url, security_enabled, ssl_cert_ca_verify=True):
client = http_client.HttpClient(url, exc_class=WebHdfsException, logger=LOG)
if security_enabled:
client.set_kerberos_auth()
client.set_verify(ssl_cert_ca_verify)
return client
@property
def uri(self):
return self._url
@property
def logical_name(self):
return self._logical_name
@classmethod
def is_sentry_managed(cls, path):
prefixes = get_nn_sentry_prefixes().split(',')
return any([path == p or path.startswith(p + '/') for p in prefixes if p])
@property
def fs_defaultfs(self):
return self._fs_defaultfs
@property
def umask(self):
return self._umask
@property
def security_enabled(self):
return self._security_enabled
@property
def ssl_cert_ca_verify(self):
return self._ssl_cert_ca_verify
@property
def superuser(self):
if self._superuser is None:
try:
# The owner of '/' is usually the superuser
sb = self.stats('/')
self._superuser = sb.user
except Exception, ex:
LOG.exception('Failed to determine superuser of %s: %s' % (self, ex))
self._superuser = DEFAULT_HDFS_SUPERUSER
return self._superuser
@property
def user(self):
try:
return self._thread_local.user
except AttributeError:
return WebHdfs.DEFAULT_USER
@property
def trash_path(self):
return self.join(self.get_home_dir(), '.Trash')
@property
def current_trash_path(self):
return self.join(self.trash_path, self.TRASH_CURRENT)
def _getparams(self):
return {
"user.name" : WebHdfs.DEFAULT_USER,
"doas" : self.user
}
def setuser(self, user):
"""Set a new user. Return the current user."""
curr = self.user
self._thread_local.user = user
return curr
def listdir_stats(self, path, glob=None):
"""
listdir_stats(path, glob=None) -> [ WebHdfsStat ]
Get directory listing with stats.
"""
path = Hdfs.normpath(path)
params = self._getparams()
if glob is not None:
params['filter'] = glob
params['op'] = 'LISTSTATUS'
json = self._root.get(path, params)
filestatus_list = json['FileStatuses']['FileStatus']
return [ WebHdfsStat(st, path) for st in filestatus_list ]
def listdir(self, path, glob=None):
"""
listdir(path, glob=None) -> [ entry names ]
Get directory entry names without stats.
"""
dirents = self.listdir_stats(path, glob)
return [Hdfs.basename(x.path) for x in dirents]
def get_content_summary(self, path):
"""
get_content_summary(path) -> WebHdfsContentSummary
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'GETCONTENTSUMMARY'
json = self._root.get(path, params)
return WebHdfsContentSummary(json['ContentSummary'])
def _stats(self, path):
"""This version of stats returns None if the entry is not found"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'GETFILESTATUS'
try:
json = self._root.get(path, params)
return WebHdfsStat(json['FileStatus'], path)
except WebHdfsException, ex:
if ex.server_exc == 'FileNotFoundException' or ex.code == 404:
return None
raise ex
def stats(self, path):
"""
stats(path) -> WebHdfsStat
"""
res = self._stats(path)
if res is not None:
return res
raise IOError(errno.ENOENT, _("File %s not found") % path)
def exists(self, path):
return self._stats(path) is not None
def isdir(self, path):
sb = self._stats(path)
if sb is None:
return False
return sb.isDir
def isfile(self, path):
sb = self._stats(path)
if sb is None:
return False
return not sb.isDir
def _ensure_current_trash_directory(self):
"""Create trash directory for a user if it doesn't exist."""
if self.exists(self.current_trash_path):
self.mkdir(self.current_trash_path)
return self.current_trash_path
def _trash(self, path, recursive=False):
"""
_trash(path, recursive=False)
Move a file or directory to trash.
Will create a timestamped directory underneath /user/<username>/.Trash.
Trash must be enabled for this to work.
"""
if not self.exists(path):
raise IOError(errno.ENOENT, _("File %s not found") % path)
if not recursive and self.isdir(path):
raise IOError(errno.EISDIR, _("File %s is a directory") % path)
if path.startswith(self.trash_path):
raise IOError(errno.EPERM, _("File %s is already trashed") % path)
# Make path (with timestamp suffix if necessary)
base_trash_path = self.join(self._ensure_current_trash_directory(), path[1:])
trash_path = base_trash_path
while self.exists(trash_path):
trash_path = base_trash_path + str(time.time())
# Move path to trash path
self.mkdir(self.dirname(trash_path))
self.rename(path, trash_path)
def _delete(self, path, recursive=False):
"""
_delete(path, recursive=False)
Delete a file or directory.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'DELETE'
params['recursive'] = recursive and 'true' or 'false'
result = self._root.delete(path, params)
# This part of the API is nonsense.
# The lack of exception should indicate success.
if not result['boolean']:
raise IOError(_('Delete failed: %s') % path)
def remove(self, path, skip_trash=False):
"""Delete a file."""
if skip_trash:
self._delete(path, recursive=False)
else:
self._trash(path, recursive=False)
def rmdir(self, path, skip_trash=False):
"""Delete a directory."""
self.remove(path, skip_trash)
def rmtree(self, path, skip_trash=False):
"""Delete a tree recursively."""
if skip_trash:
self._delete(path, recursive=True)
else:
self._trash(path, recursive=True)
def restore(self, path):
"""
restore(path)
The root of ``path`` will be /users/<current user>/.Trash/<timestamp>.
Removing the root from ``path`` will provide the original path.
Ensure parent directories exist and rename path.
"""
if not path.startswith(self.trash_path):
raise IOError(errno.EPERM, _("File %s is not in trash") % path)
# Build original path
original_path = []
split_path = self.split(path)
while split_path[0] != self.trash_path:
original_path.append(split_path[1])
split_path = self.split(split_path[0])
original_path.reverse()
original_path = self.join(posixpath.sep, *original_path)
# move to original path
# the path could have been expunged.
if self.exists(original_path):
raise IOError(errno.EEXIST, _("Path %s already exists.") % str(smart_str(original_path)))
self.rename(path, original_path)
def purge_trash(self):
"""
purge_trash()
Purge all trash in users ``trash_path``
"""
for timestamped_directory in self.listdir(self.trash_path):
self.rmtree(self.join(self.trash_path, timestamped_directory), True)
def mkdir(self, path, mode=None):
"""
mkdir(path, mode=None)
Creates a directory and any parent directory if necessary.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'MKDIRS'
if mode is None:
mode = self.getDefaultDirPerms()
params['permission'] = safe_octal(mode)
success = self._root.put(path, params)
if not success:
raise IOError(_("Mkdir failed: %s") % path)
def rename(self, old, new):
"""rename(old, new)"""
old = Hdfs.normpath(old)
if not new.startswith('/'):
new = Hdfs.join(Hdfs.dirname(old), new)
new = Hdfs.normpath(new)
params = self._getparams()
params['op'] = 'RENAME'
# Encode `new' because it's in the params
params['destination'] = smart_str(new)
result = self._root.put(old, params)
if not result['boolean']:
raise IOError(_("Rename failed: %s -> %s") %
(str(smart_str(old)), str(smart_str(new))))
def rename_star(self, old_dir, new_dir):
"""Equivalent to `mv old_dir/* new"""
if not self.isdir(old_dir):
raise IOError(errno.ENOTDIR, _("'%s' is not a directory") % old_dir)
if not self.exists(new_dir):
self.mkdir(new_dir)
elif not self.isdir(new_dir):
raise IOError(errno.ENOTDIR, _("'%s' is not a directory") % new_dir)
ls = self.listdir(old_dir)
for dirent in ls:
self.rename(Hdfs.join(old_dir, dirent), Hdfs.join(new_dir, dirent))
def chown(self, path, user=None, group=None, recursive=False):
"""chown(path, user=None, group=None, recursive=False)"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'SETOWNER'
if user is not None:
params['owner'] = user
if group is not None:
params['group'] = group
if recursive:
for xpath in self.listdir_recursive(path):
self._root.put(xpath, params)
else:
self._root.put(path, params)
def chmod(self, path, mode, recursive=False):
"""
chmod(path, mode, recursive=False)
`mode' should be an octal integer or string.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'SETPERMISSION'
params['permission'] = safe_octal(mode)
if recursive:
for xpath in self.listdir_recursive(path):
self._root.put(xpath, params)
else:
self._root.put(path, params)
def get_home_dir(self):
"""get_home_dir() -> Home directory for the current user"""
params = self._getparams()
params['op'] = 'GETHOMEDIRECTORY'
res = self._root.get(params=params)
return res['Path']
def read(self, path, offset, length, bufsize=None):
"""
read(path, offset, length[, bufsize]) -> data
Read data from a file.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'OPEN'
params['offset'] = long(offset)
params['length'] = long(length)
if bufsize is not None:
params['bufsize'] = bufsize
try:
return self._root.get(path, params)
except WebHdfsException, ex:
if "out of the range" in ex.message:
return ""
raise ex
def open(self, path, mode='r'):
"""
DEPRECATED!
open(path, mode='r') -> File object
This exists for legacy support and backwards compatibility only.
Please use read().
"""
return File(self, path, mode)
def getDefaultFilePerms(self):
return 0666 & (01777 ^ self.umask)
def getDefaultDirPerms(self):
return 01777 & (01777 ^ self.umask)
def create(self, path, overwrite=False, blocksize=None, replication=None, permission=None, data=None):
"""
create(path, overwrite=False, blocksize=None, replication=None, permission=None)
Creates a file with the specified parameters.
`permission' should be an octal integer or string.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'CREATE'
params['overwrite'] = overwrite and 'true' or 'false'
if blocksize is not None:
params['blocksize'] = long(blocksize)
if replication is not None:
params['replication'] = int(replication)
if permission is None:
permission = self.getDefaultFilePerms()
params['permission'] = safe_octal(permission)
self._invoke_with_redirect('PUT', path, params, data)
def append(self, path, data):
"""
append(path, data)
Append data to a given file.
"""
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'APPEND'
self._invoke_with_redirect('POST', path, params, data)
# e.g. ACLSPEC = user:joe:rwx,user::rw-
def modify_acl_entries(self, path, aclspec):
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'MODIFYACLENTRIES'
params['aclspec'] = aclspec
return self._root.put(path, params)
def remove_acl_entries(self, path, aclspec):
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'REMOVEACLENTRIES'
params['aclspec'] = aclspec
return self._root.put(path, params)
def remove_default_acl(self, path):
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'REMOVEDEFAULTACL'
return self._root.put(path, params)
def remove_acl(self, path):
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'REMOVEACL'
return self._root.put(path, params)
def set_acl(self, path, aclspec):
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'SETACL'
params['aclspec'] = aclspec
return self._root.put(path, params)
def get_acl_status(self, path):
path = Hdfs.normpath(path)
params = self._getparams()
params['op'] = 'GETACLSTATUS'
return self._root.get(path, params)
def copyfile(self, src, dst, skip_header=False):
sb = self._stats(src)
if sb is None:
raise IOError(errno.ENOENT, _("Copy src '%s' does not exist") % src)
if sb.isDir:
raise IOError(errno.INVAL, _("Copy src '%s' is a directory") % src)
if self.isdir(dst):
raise IOError(errno.INVAL, _("Copy dst '%s' is a directory") % dst)
offset = 0
while True:
data = self.read(src, offset, UPLOAD_CHUNK_SIZE.get())
cnt = len(data)
if offset == 0:
if skip_header:
n = data.index('\n')
if n > 0:
data = data[n + 1:]
self.create(dst,
overwrite=True,
blocksize=sb.blockSize,
replication=sb.replication,
permission=oct(stat.S_IMODE(sb.mode)),
data=data)
else:
self.append(dst, data)
if cnt < UPLOAD_CHUNK_SIZE.get():
break
offset += cnt
def copy_remote_dir(self, source, destination, dir_mode=None, owner=None):
if owner is None:
owner = self.DEFAULT_USER
if dir_mode is None:
dir_mode = self.getDefaultDirPerms()
self.do_as_user(owner, self.mkdir, destination, mode=dir_mode)
for stat in self.listdir_stats(source):
source_file = stat.path
destination_file = posixpath.join(destination, stat.name)
if stat.isDir:
self.copy_remote_dir(source_file, destination_file, dir_mode, owner)
else:
self.do_as_user(owner, self.copyfile, source_file, destination_file)
def copy(self, src, dest, recursive=False, dir_mode=None, owner=None):
"""
Copy file, or directory, in HDFS to another location in HDFS.
``src`` -- The directory, or file, to copy from.
``dest`` -- the directory, or file, to copy to.
If 'dest' is a directory that exists, copy 'src' into dest.
If 'dest' is a file that exists and 'src' is a file, overwrite dest.
If 'dest' does not exist, create 'src' as 'dest'.
``recursive`` -- Recursively copy contents of 'src' to 'dest'.
This is required for directories.
``dir_mode`` and ``owner`` are used to define permissions on the newly
copied files and directories.
This method will overwrite any pre-existing files that collide with what is being copied.
Copying a directory to a file is not allowed.
"""
if owner is None:
owner = self.user
# Hue was defauling permissions on copying files to the permissions
# of the original file, but was not doing the same for directories
# changed below for directories to remain consistent
if dir_mode is None:
sb = self._stats(src)
dir_mode=oct(stat.S_IMODE(sb.mode))
src = self.abspath(src)
dest = self.abspath(dest)
if not self.exists(src):
raise IOError(errno.ENOENT, _("File not found: %s") % src)
if self.isdir(src):
# 'src' is directory.
# Skip if not recursive copy and 'src' is directory.
if not recursive:
LOG.debug("Skipping contents of %s" % src)
return None
# If 'dest' is a directory change 'dest'
# to include 'src' basename.
# create 'dest' if it doesn't already exist.
if self.exists(dest):
if self.isdir(dest):
dest = self.join(dest, self.basename(src))
else:
raise IOError(errno.EEXIST, _("Destination file %s exists and is not a directory.") % dest)
self.do_as_user(owner, self.mkdir, dest, mode=dir_mode)
# Copy files in 'src' directory to 'dest'.
self.copy_remote_dir(src, dest, dir_mode, owner)
else:
# 'src' is a file.
# If 'dest' is a directory, then copy 'src' into that directory.
# Other wise, copy to 'dest'.
if self.exists(dest) and self.isdir(dest):
self.copyfile(src, self.join(dest, self.basename(src)))
else:
self.copyfile(src, dest)
@staticmethod
def urlsplit(url):
return Hdfs.urlsplit(url)
def get_hdfs_path(self, path):
return posixpath.join(self.fs_defaultfs, path.lstrip('/'))
def _invoke_with_redirect(self, method, path, params=None, data=None):
"""
Issue a request, and expect a redirect, and then submit the data to
the redirected location. This is used for create, write, etc.
Returns the response from the redirected request.
"""
next_url = None
try:
# Do not pass data in the first leg.
self._root.invoke(method, path, params)
except WebHdfsException, ex:
# This is expected. We get a 307 redirect.
# The following call may throw.
next_url = self._get_redirect_url(ex)
if next_url is None:
raise WebHdfsException(_("Failed to create '%s'. HDFS did not return a redirect") % path)
# Now talk to the real thing. The redirect url already includes the params.
client = self._make_client(next_url, self.security_enabled, self.ssl_cert_ca_verify)
headers = {'Content-Type': 'application/octet-stream'}
return resource.Resource(client).invoke(method, data=data, headers=headers)
def _get_redirect_url(self, webhdfs_ex):
"""Retrieve the redirect url from an exception object"""
try:
# The actual HttpError (307) is wrapped inside
http_error = webhdfs_ex.get_parent_ex()
if http_error is None:
raise webhdfs_ex
if http_error.response.status_code not in (301, 302, 303, 307):
LOG.error("Response is not a redirect: %s" % webhdfs_ex)
raise webhdfs_ex
return http_error.response.headers['location']
except Exception, ex:
LOG.exception("Failed to read redirect from response: %s (%s)" % (webhdfs_ex, ex))
raise webhdfs_ex
def get_delegation_token(self, renewer):
"""get_delegation_token(user) -> Delegation token"""
# Workaround for HDFS-3988
if self._security_enabled:
self.get_home_dir()
params = self._getparams()
params['op'] = 'GETDELEGATIONTOKEN'
params['renewer'] = renewer
res = self._root.get(params=params)
return res['Token']['urlString']
def do_as_user(self, username, fn, *args, **kwargs):
prev_user = self.user
try:
self.setuser(username)
return fn(*args, **kwargs)
finally:
self.setuser(prev_user)
def do_as_superuser(self, fn, *args, **kwargs):
return self.do_as_user(self.superuser, fn, *args, **kwargs)
def do_recursively(self, fn, path, *args, **kwargs):
for stat in self.listdir_stats(path):
try:
if stat.isDir:
self.do_recursively(fn, stat.path, *args, **kwargs)
fn(stat.path, *args, **kwargs)
except Exception:
pass
class File(object):
"""
DEPRECATED!
Represent an open file on HDFS. This exists to mirror the old thriftfs
interface, for backwards compatibility only.
"""
def __init__(self, fs, path, mode='r'):
self._fs = fs
self._path = normpath(path)
self._pos = 0
self._mode = mode
try:
self._stat = fs.stats(path)
if self._stat.isDir:
raise IOError(errno.EISDIR, _("Is a directory: '%s'") % path)
except IOError, ex:
if ex.errno == errno.ENOENT and 'w' in self._mode:
self._fs.create(self._path)
self.stat()
else:
raise ex
def seek(self, offset, whence=0):
"""Set the file pointer to the given spot. @see file.seek"""
if whence == SEEK_SET:
self._pos = offset
elif whence == SEEK_CUR:
self._pos += offset
elif whence == SEEK_END:
self.stat()
self._pos = self._fs.stats(self._path).size + offset
else:
raise IOError(errno.EINVAL, _("Invalid argument to seek for whence"))
def stat(self):
self._stat = self._fs.stats(self._path)
return self._stat
def tell(self):
return self._pos
def read(self, length=DEFAULT_READ_SIZE):
data = self._fs.read(self._path, self._pos, length)
self._pos += len(data)
return data
def write(self, data):
"""Append the data to the end of the file"""
self.append(data)
def append(self, data):
if 'w' not in self._mode:
raise IOError(errno.EINVAL, _("File not open for writing"))
self._fs.append(self._path, data=data)
def flush(self):
pass
def close(self):
pass
def safe_octal(octal_value):
"""
safe_octal(octal_value) -> octal value in string
This correctly handles octal values specified as a string or as a numeric.
"""
try:
return oct(octal_value)
except TypeError:
return str(octal_value)
def _get_service_url(hdfs_config):
override = hdfs_config.WEBHDFS_URL.get()
if override:
return override
fs_defaultfs = hdfs_config.FS_DEFAULTFS.get()
netloc = Hdfs.urlsplit(fs_defaultfs)[1]
host = netloc.split(':')[0]
port = hadoop.conf.DEFAULT_NN_HTTP_PORT
return "http://%s:%s/webhdfs/v1" % (host, port)
def test_fs_configuration(fs_config):
"""
This is a config validation method. Returns a list of
[ (config_variable, error_message) ]
"""
fs = WebHdfs.from_config(fs_config)
fs.setuser(fs.superuser)
# Access root
try:
statbuf = fs.stats('/')
if statbuf.user != DEFAULT_HDFS_SUPERUSER:
return [(fs_config.WEBHDFS_URL, _("Filesystem root '/' should be owned by 'hdfs'"))]
except Exception, ex:
LOG.info("%s -- Validation error: %s" % (fs, ex))
return [(fs_config.WEBHDFS_URL, _('Failed to access filesystem root'))]
# Write a file
tmpname = fs.mktemp(prefix='hue_config_validation')
try:
fs.create(tmpname)
except Exception, ex:
LOG.info("%s -- Validation error: %s" % (fs, ex))
return [(fs_config.WEBHDFS_URL,
_('Failed to create temporary file "%s"') % tmpname)]
# Check superuser has super power
try:
try:
fs.chown(tmpname, fs.superuser)
except Exception, ex:
LOG.info("%s -- Validation error: %s" % (fs, ex))
return [(fs_config.WEBHDFS_URL,
'Failed to chown file. Please make sure that the filesystem root '
'is owned by the cluster superuser ("hdfs" in most cases).')]
finally:
try:
fs.remove(tmpname)
except Exception, ex:
LOG.error("Failed to remove '%s': %s" % (tmpname, ex))
return [(fs_config.WEBHDFS_URL,
_('Failed to remove temporary file "%s"') % tmpname)]
return [ ]
|
{
"content_hash": "1276176e2c07dd5a5b9ca135cdfad418",
"timestamp": "",
"source": "github",
"line_count": 875,
"max_line_length": 133,
"avg_line_length": 29.461714285714287,
"alnum_prop": 0.6320260677295473,
"repo_name": "nvoron23/hue",
"id": "412d85eea5a3a06545ac166ed4858571ec2ee504",
"size": "26571",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "desktop/libs/hadoop/src/hadoop/fs/webhdfs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1712779"
},
{
"name": "C++",
"bytes": "178518"
},
{
"name": "CSS",
"bytes": "415919"
},
{
"name": "Emacs Lisp",
"bytes": "12145"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Groff",
"bytes": "14877"
},
{
"name": "HTML",
"bytes": "21211447"
},
{
"name": "Java",
"bytes": "3080564"
},
{
"name": "JavaScript",
"bytes": "2547814"
},
{
"name": "Makefile",
"bytes": "87389"
},
{
"name": "Mako",
"bytes": "2041625"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "161801"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Prolog",
"bytes": "4590"
},
{
"name": "Python",
"bytes": "31227804"
},
{
"name": "Scala",
"bytes": "75705"
},
{
"name": "Shell",
"bytes": "41224"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "129526"
},
{
"name": "Thrift",
"bytes": "100994"
},
{
"name": "XSLT",
"bytes": "342237"
}
],
"symlink_target": ""
}
|
"""
Implements a global range check used to filter out very obvious errors.
"""
from util import obs_utils
def test(p, parameters):
"""
Runs the quality control check on profile p and returns a numpy array
of quality control decisions with False where the data value has
passed the check and True where it failed.
"""
# Get temperature and pressure values from the profile.
t = p.t()
# Make the quality control decisions. This should
# return true if the temperature is outside -4 deg C
# and 100 deg C.
qc = (t.mask == False) & ((t.data < -4.0) | (t.data > 100.0))
return qc
|
{
"content_hash": "cb48bd910d3aafd1ed65cefe0d8a33bb",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 74,
"avg_line_length": 26.666666666666668,
"alnum_prop": 0.653125,
"repo_name": "IQuOD/AutoQC",
"id": "f0a21149dede7c530abd7111804291f5280dce97",
"size": "640",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "qctests/IQuOD_gross_range_check.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "868390"
},
{
"name": "Shell",
"bytes": "2581"
}
],
"symlink_target": ""
}
|
from gm_pr.models import Project, Repo
def proj_repo(request):
""" Retrieve project list from a Slack request or web request.
Parameter come from GET and can be either 'channel_name' or 'project' (they
both give the same result)
return a tuple: list of projects, channel name
"""
repos = None
project = None
if request.GET != None and \
'channel_name' in request.GET or 'project' in request.GET:
if 'channel_name' in request.GET:
project = request.GET['channel_name']
else:
project = request.GET['project']
repos = Repo.objects.filter(projects__name=project)
return project, repos
|
{
"content_hash": "29a8d335f036a33c76c2b58a85a10ecd",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 79,
"avg_line_length": 33.75,
"alnum_prop": 0.6459259259259259,
"repo_name": "Genymobile/gm_pr",
"id": "895669c32c67879d66bc02f7ea752e9fcfb38ed7",
"size": "1252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gm_pr/proj_repo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1450"
},
{
"name": "Dockerfile",
"bytes": "1413"
},
{
"name": "HTML",
"bytes": "8891"
},
{
"name": "Python",
"bytes": "61706"
}
],
"symlink_target": ""
}
|
import frappe
import unittest
from frappe.test_runner import make_test_objects
test_records = frappe.get_test_records('Email Domain')
class TestDomain(unittest.TestCase):
def setUp(self):
make_test_objects('Email Domain', reset=True)
def tearDown(self):
frappe.delete_doc("Email Account", "Test")
frappe.delete_doc("Email Domain", "test.com")
def test_on_update(self):
mail_domain = frappe.get_doc("Email Domain", "test.com")
mail_account = frappe.get_doc("Email Account", "Test")
# Ensure a different port
mail_account.incoming_port = int(mail_domain.incoming_port) + 5
mail_account.save()
# Trigger update of accounts using this domain
mail_domain.on_update()
mail_account.reload()
# After update, incoming_port in account should match the domain
self.assertEqual(mail_account.incoming_port, mail_domain.incoming_port)
# Also make sure that the other attributes match
self.assertEqual(mail_account.use_imap, mail_domain.use_imap)
self.assertEqual(mail_account.use_ssl, mail_domain.use_ssl)
self.assertEqual(mail_account.use_tls, mail_domain.use_tls)
self.assertEqual(mail_account.attachment_limit, mail_domain.attachment_limit)
self.assertEqual(mail_account.smtp_server, mail_domain.smtp_server)
self.assertEqual(mail_account.smtp_port, mail_domain.smtp_port)
|
{
"content_hash": "7d417084854398c692bc141c70058c58",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 79,
"avg_line_length": 36.47222222222222,
"alnum_prop": 0.7555217060167555,
"repo_name": "mhbu50/frappe",
"id": "7522dd5282ebdef7b388a309a1dcfa4b8b2c5a60",
"size": "1434",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/email/doctype/email_domain/test_email_domain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "67734"
},
{
"name": "HTML",
"bytes": "247122"
},
{
"name": "JavaScript",
"bytes": "2359670"
},
{
"name": "Less",
"bytes": "25489"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3464477"
},
{
"name": "SCSS",
"bytes": "248877"
},
{
"name": "Shell",
"bytes": "3505"
},
{
"name": "Vue",
"bytes": "96912"
}
],
"symlink_target": ""
}
|
import bsp as openmoto_bsp # bsp should point to the bsp file for your hw
import threading
import time
# Thread class for PWM Turn signals
class turnThread (threading.Thread):
def __init__(self, threadID, name, output, event):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.out = output
self.event = event
def run(self):
print "Starting " + self.name + '\n'
value = openmoto_bsp.turnsignal_min
while 1:
if not self.event.isSet():
step = openmoto_bsp.turnsignal_step
value = openmoto_bsp.turnsignal_min
self.event.wait()
self.out(value)
time.sleep(openmoto_bsp.turnsignal_delay)
value += step
if value >= openmoto_bsp.turnsignal_max or value <= openmoto_bsp.turnsignal_min:
step = -step
time.sleep(3*openmoto_bsp.turnsignal_delay)
print "Exiting " + self.name + '\n'
# Thread class for PWM Brake signal
class brakeThread (threading.Thread):
def __init__(self, threadID, name, output, event):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.out = output
self.event = event
def run(self):
print "Starting " + self.name + '\n'
self.out(openmoto_bsp.brakelight_min)
while 1:
if not self.event.isSet():
self.out(openmoto_bsp.brakelight_min)
self.event.wait()
# Pop briefly to MAX
self.out(openmoto_bsp.brakelight_max)
time.sleep(openmoto_bsp.brakelight_delay)
# Drop back to high
self.out(openmoto_bsp.brakelight_max-openmoto_bsp.brakelight_step)
time.sleep(openmoto_bsp.brakelight_delay*10)
print "Exiting " + self.name + '\n'
|
{
"content_hash": "eeb5297a22e87999a015fe58b6264f3d",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 92,
"avg_line_length": 37.509803921568626,
"alnum_prop": 0.5812859383167799,
"repo_name": "hakmo/openmoto",
"id": "5a33c23a9a17fba06f0d76791720a958391fc186",
"size": "1984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/openmoto_signals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7644"
}
],
"symlink_target": ""
}
|
import string
s = "The Lion, the Witch and the Ward-Robe" # clint's text
s = "The Lion, the Witch, and the Ward-robe" # bea's text
exclude = set(string.punctuation)
s = ''.join(ch for ch in s if ch not in exclude).lower()
print s
|
{
"content_hash": "7c587819c1217f30c004ae31a3017ee4",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 58,
"avg_line_length": 25.88888888888889,
"alnum_prop": 0.6824034334763949,
"repo_name": "FantasyFactionTools/contest-ebooks",
"id": "af1242a8cc59e32646c9efd1a8837d7ea5582573",
"size": "233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "title-test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4480"
},
{
"name": "HTML",
"bytes": "8819083"
},
{
"name": "Python",
"bytes": "23908"
},
{
"name": "Shell",
"bytes": "5004"
}
],
"symlink_target": ""
}
|
import os
from os.path import sep, join, exists
from itertools import groupby
from xml.etree.ElementTree import Element, tostring
import re
import json
from tools.arm_pack_manager import Cache
from tools.targets import TARGET_MAP
from tools.export.exporters import Exporter, TargetNotSupportedException
from tools.utils import split_path
class fileCMSIS():
"""CMSIS file class.
Encapsulates information necessary for files in cpdsc project file"""
file_types = {'.cpp': 'sourceCpp', '.c': 'sourceC', '.s': 'sourceAsm',
'.obj': 'object', '.o': 'object', '.lib': 'library',
'.ar': 'linkerScript', '.h': 'header', '.sct': 'linkerScript'}
def __init__(self, loc, name):
#print loc
_, ext = os.path.splitext(loc)
self.type = self.file_types[ext.lower()]
self.loc = loc
self.name = name
class DeviceCMSIS():
"""CMSIS Device class
Encapsulates target information retrieved by arm-pack-manager"""
CACHE = Cache(True, False)
def __init__(self, target):
target_info = self.check_supported(target)
if not target_info:
raise TargetNotSupportedException("Target not supported in CMSIS pack")
self.url = target_info['pdsc_file']
self.pdsc_url, self.pdsc_id, _ = split_path(self.url)
self.pack_url, self.pack_id, _ = split_path(target_info['pack_file'])
self.dname = target_info["_cpu_name"]
self.core = target_info["_core"]
self.dfpu = target_info['processor']['fpu']
self.debug, self.dvendor = self.vendor_debug(target_info['vendor'])
self.dendian = target_info['processor'].get('endianness','Little-endian')
self.debug_svd = target_info.get('debug', '')
self.compile_header = target_info['compile']['header']
self.target_info = target_info
@staticmethod
def check_supported(target):
t = TARGET_MAP[target]
try:
cpu_name = t.device_name
target_info = DeviceCMSIS.CACHE.index[cpu_name]
# Target does not have device name or pdsc file
except:
try:
# Try to find the core as a generic CMSIS target
cpu_name = DeviceCMSIS.cpu_cmsis(t.core)
target_info = DeviceCMSIS.CACHE.index[cpu_name]
except:
return False
target_info["_cpu_name"] = cpu_name
target_info["_core"] = t.core
return target_info
def vendor_debug(self, vendor):
"""Reads the vendor from a PDSC <dvendor> tag.
This tag contains some additional numeric information that is meaningless
for our purposes, so we use a regex to filter.
Positional arguments:
Vendor - information in <dvendor> tag scraped from ArmPackManager
Returns a tuple of (debugger, vendor)
"""
reg = "([\w\s]+):?\d*?"
m = re.search(reg, vendor)
vendor_match = m.group(1) if m else None
debug_map ={
'STMicroelectronics':'ST-Link',
'Silicon Labs':'J-LINK',
'Nuvoton':'NULink'
}
return debug_map.get(vendor_match, "CMSIS-DAP"), vendor_match
@staticmethod
def cpu_cmsis(cpu):
"""
Transforms information from targets.json to the way the generic cores are named
in CMSIS PDSC files.
Ex:
Cortex-M4F => ARMCM4_FP, Cortex-M0+ => ARMCM0P
Returns formatted CPU
"""
cpu = cpu.replace("Cortex-","ARMC")
cpu = cpu.replace("+","P")
cpu = cpu.replace("F","_FP")
cpu = cpu.replace("-NS", "")
return cpu
class CMSIS(Exporter):
NAME = 'cmsis'
TOOLCHAIN = 'ARM'
@classmethod
def is_target_supported(cls, target_name):
target = TARGET_MAP[target_name]
return cls.TOOLCHAIN in target.supported_toolchains
def make_key(self, src):
"""turn a source file into its group name"""
key = src.name.split(sep)[0]
if key == ".":
key = os.path.basename(os.path.realpath(self.export_dir))
return key
def group_project_files(self, sources, root_element):
"""Recursively group the source files by their encompassing directory"""
data = sorted(sources, key=self.make_key)
for group, files in groupby(data, self.make_key):
new_srcs = []
for f in list(files):
spl = f.name.split(sep)
if len(spl) <= 2:
file_element = Element('file',
attrib={
'category':f.type,
'name': f.loc})
root_element.append(file_element)
else:
f.name = os.path.join(*spl[1:])
new_srcs.append(f)
if new_srcs:
group_element = Element('group',attrib={'name':group})
root_element.append(self.group_project_files(new_srcs,
group_element))
return root_element
def generate(self):
srcs = self.resources.headers + self.resources.s_sources + \
self.resources.c_sources + self.resources.cpp_sources + \
self.resources.objects + self.libraries + \
[self.resources.linker_script]
srcs = [fileCMSIS(src, src) for src in srcs if src]
ctx = {
'name': self.project_name,
'project_files': tostring(self.group_project_files(srcs, Element('files'))),
'device': DeviceCMSIS(self.target),
'date': ''
}
self.gen_file('cmsis/cpdsc.tmpl', ctx, 'project.cpdsc')
@staticmethod
def clean(_):
os.remove('project.cpdsc')
|
{
"content_hash": "c92e8ddf39cc6cb9d680c199f51d77f8",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 88,
"avg_line_length": 36.4472049689441,
"alnum_prop": 0.5625426039536469,
"repo_name": "betzw/mbed-os",
"id": "e0403aedf27c5f0632a8a75aa56349b545da8ef5",
"size": "5868",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/export/cmsis/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "10950"
},
{
"name": "Assembly",
"bytes": "7623507"
},
{
"name": "Batchfile",
"bytes": "22"
},
{
"name": "C",
"bytes": "374441509"
},
{
"name": "C++",
"bytes": "14180257"
},
{
"name": "CMake",
"bytes": "22983"
},
{
"name": "HTML",
"bytes": "1421788"
},
{
"name": "Makefile",
"bytes": "119198"
},
{
"name": "Objective-C",
"bytes": "74923"
},
{
"name": "Perl",
"bytes": "2589"
},
{
"name": "Python",
"bytes": "1347030"
},
{
"name": "Shell",
"bytes": "88415"
},
{
"name": "XSLT",
"bytes": "8394"
}
],
"symlink_target": ""
}
|
import re
# Separates a tag: at the beginning of the subject from the rest of it
re_subject_tag = re.compile('([^:\s]*):\s*(.*)')
class Commit:
"""Holds information about a single commit/patch in the series.
Args:
hash: Commit hash (as a string)
Variables:
hash: Commit hash
subject: Subject line
tags: List of maintainer tag strings
changes: Dict containing a list of changes (single line strings).
The dict is indexed by change version (an integer)
cc_list: List of people to aliases/emails to cc on this commit
notes: List of lines in the commit (not series) notes
"""
def __init__(self, hash):
self.hash = hash
self.subject = None
self.tags = []
self.changes = {}
self.cc_list = []
self.notes = []
def AddChange(self, version, info):
"""Add a new change line to the change list for a version.
Args:
version: Patch set version (integer: 1, 2, 3)
info: Description of change in this version
"""
if not self.changes.get(version):
self.changes[version] = []
self.changes[version].append(info)
def CheckTags(self):
"""Create a list of subject tags in the commit
Subject tags look like this:
propounder: fort: Change the widget to propound correctly
Here the tags are propounder and fort. Multiple tags are supported.
The list is updated in self.tag.
Returns:
None if ok, else the name of a tag with no email alias
"""
str = self.subject
m = True
while m:
m = re_subject_tag.match(str)
if m:
tag = m.group(1)
self.tags.append(tag)
str = m.group(2)
return None
def AddCc(self, cc_list):
"""Add a list of people to Cc when we send this patch.
Args:
cc_list: List of aliases or email addresses
"""
self.cc_list += cc_list
|
{
"content_hash": "18b895a7682a152e6cc7c7c0632501f1",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 75,
"avg_line_length": 30.10144927536232,
"alnum_prop": 0.5671641791044776,
"repo_name": "EleVenPerfect/S3C2440",
"id": "89cce7f88a297dd32e486cd3cd1a7a0ef7819332",
"size": "2164",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "bootloader/u-boot-2014.04 for tq2440/tools/patman/commit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "107623"
},
{
"name": "Awk",
"bytes": "145"
},
{
"name": "C",
"bytes": "53471003"
},
{
"name": "C++",
"bytes": "4794882"
},
{
"name": "CSS",
"bytes": "7584"
},
{
"name": "GDB",
"bytes": "3642"
},
{
"name": "Makefile",
"bytes": "507759"
},
{
"name": "Objective-C",
"bytes": "33048"
},
{
"name": "PHP",
"bytes": "108169"
},
{
"name": "Perl",
"bytes": "213214"
},
{
"name": "Python",
"bytes": "223908"
},
{
"name": "Roff",
"bytes": "197018"
},
{
"name": "Shell",
"bytes": "86972"
},
{
"name": "Tcl",
"bytes": "967"
},
{
"name": "XSLT",
"bytes": "445"
}
],
"symlink_target": ""
}
|
"""
Install any components that fall under 'galaxy_tools' directive in main.yaml
"""
from cloudbio.galaxy.tools import _install_tools
from cloudbio.custom.galaxy import _prep_galaxy
def install_cbl_galaxy_tools(env):
_prep_galaxy(env)
_install_tools(env)
|
{
"content_hash": "f4bdf497105776e1dab5789841553fbc",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 76,
"avg_line_length": 26.5,
"alnum_prop": 0.7509433962264151,
"repo_name": "chapmanb/cloudbiolinux",
"id": "5376cff9a0f1ee2033471cd430e4062ec1a648f8",
"size": "265",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "cloudbio/custom/galaxy_tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3878"
},
{
"name": "Python",
"bytes": "604413"
},
{
"name": "Ruby",
"bytes": "15629"
},
{
"name": "Shell",
"bytes": "29623"
}
],
"symlink_target": ""
}
|
import unittest
from decimal import Decimal
from posixpath import join as pjoin
import numpy as np
import pandas as pd
import pytest
import ibis
import ibis.backends.impala as api # noqa: E402
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.rules as rules
import ibis.expr.types as ir
import ibis.util as util
from ibis.common.exceptions import IbisTypeError
from ibis.tests.expr.mocks import MockBackend
from .. import ddl # noqa: E402
pytestmark = pytest.mark.udf
class TestWrapping(unittest.TestCase):
def setUp(self):
self.con = MockBackend()
self.table = self.con.table('functional_alltypes')
self.i8 = self.table.tinyint_col
self.i16 = self.table.smallint_col
self.i32 = self.table.int_col
self.i64 = self.table.bigint_col
self.d = self.table.double_col
self.f = self.table.float_col
self.s = self.table.string_col
self.b = self.table.bool_col
self.t = self.table.timestamp_col
self.dec = self.con.table('tpch_customer').c_acctbal
self.all_cols = [
self.i8,
self.i16,
self.i32,
self.i64,
self.d,
self.f,
self.dec,
self.s,
self.b,
self.t,
]
def test_sql_generation(self):
func = api.scalar_function(['string'], 'string', name='Tester')
func.register('identity', 'udf_testing')
result = func('hello world')
assert (
ibis.impala.compile(result)
== "SELECT udf_testing.identity('hello world') AS `tmp`"
)
def test_sql_generation_from_infoclass(self):
func = api.wrap_udf('test.so', ['string'], 'string', 'info_test')
repr(func)
func.register('info_test', 'udf_testing')
result = func('hello world')
assert (
ibis.impala.compile(result)
== "SELECT udf_testing.info_test('hello world') AS `tmp`"
)
def test_udf_primitive_output_types(self):
types = [
('boolean', True, self.b),
('int8', 1, self.i8),
('int16', 1, self.i16),
('int32', 1, self.i32),
('int64', 1, self.i64),
('float', 1.0, self.f),
('double', 1.0, self.d),
('string', '1', self.s),
('timestamp', ibis.timestamp('1961-04-10'), self.t),
]
for t, sv, av in types:
func = self._register_udf([t], t, 'test')
ibis_type = dt.validate_type(t)
expr = func(sv)
assert type(expr) == type( # noqa: E501, E721
ibis_type.scalar_type()(expr.op())
)
expr = func(av)
assert type(expr) == type( # noqa: E501, E721
ibis_type.column_type()(expr.op())
)
def test_uda_primitive_output_types(self):
types = [
('boolean', True, self.b),
('int8', 1, self.i8),
('int16', 1, self.i16),
('int32', 1, self.i32),
('int64', 1, self.i64),
('float', 1.0, self.f),
('double', 1.0, self.d),
('string', '1', self.s),
('timestamp', ibis.timestamp('1961-04-10'), self.t),
]
for t, sv, av in types:
func = self._register_uda([t], t, 'test')
ibis_type = dt.validate_type(t)
expr1 = func(sv)
expr2 = func(sv)
expected_type1 = type(ibis_type.scalar_type()(expr1.op()))
expected_type2 = type(ibis_type.scalar_type()(expr2.op()))
assert isinstance(expr1, expected_type1)
assert isinstance(expr2, expected_type2)
def test_decimal(self):
func = self._register_udf(['decimal(9,0)'], 'decimal(9,0)', 'test')
expr = func(1.0)
assert type(expr) == ir.DecimalScalar
expr = func(self.dec)
assert type(expr) == ir.DecimalColumn
def test_udf_invalid_typecasting(self):
cases = [
('int8', self.all_cols[:1], self.all_cols[1:]),
('int16', self.all_cols[:2], self.all_cols[2:]),
('int32', self.all_cols[:3], self.all_cols[3:]),
('int64', self.all_cols[:4], self.all_cols[4:]),
('boolean', [], self.all_cols[:8] + self.all_cols[9:]),
# allowing double here for now
('float', self.all_cols[:6], [self.s, self.b, self.t]),
('double', self.all_cols[:6], [self.s, self.b, self.t]),
('string', [], self.all_cols[:7] + self.all_cols[8:]),
('timestamp', [], self.all_cols[:-1]),
('decimal', self.all_cols[:7], self.all_cols[7:]),
]
for t, valid_casts, invalid_casts in cases:
func = self._register_udf([t], 'int32', 'typecast')
for expr in valid_casts:
func(expr)
for expr in invalid_casts:
self.assertRaises(IbisTypeError, func, expr)
def test_mult_args(self):
func = self._register_udf(
['int32', 'double', 'string', 'boolean', 'timestamp'],
'int64',
'mult_types',
)
expr = func(self.i32, self.d, self.s, self.b, self.t)
assert issubclass(type(expr), ir.ColumnExpr)
expr = func(1, 1.0, 'a', True, ibis.timestamp('1961-04-10'))
assert issubclass(type(expr), ir.ScalarExpr)
def _register_udf(self, inputs, output, name):
func = api.scalar_function(inputs, output, name=name)
func.register(name, 'ibis_testing')
return func
def _register_uda(self, inputs, output, name):
func = api.aggregate_function(inputs, output, name=name)
func.register(name, 'ibis_testing')
return func
@pytest.fixture
def udfcon(con):
con.disable_codegen(False)
try:
yield con
finally:
con.disable_codegen(True)
@pytest.fixture
def alltypes(udfcon):
return udfcon.table('functional_alltypes')
@pytest.fixture
def udf_ll(udfcon, test_data_dir):
return pjoin(test_data_dir, 'udf/udf-sample.ll')
@pytest.fixture
def uda_ll(udfcon, test_data_dir):
return pjoin(test_data_dir, 'udf/uda-sample.ll')
@pytest.fixture
def uda_so(udfcon, test_data_dir):
return pjoin(test_data_dir, 'udf/libudasample.so')
@pytest.mark.parametrize(
('typ', 'lit_val', 'col_name'),
[
('boolean', True, 'bool_col'),
('int8', ibis.literal(5), 'tinyint_col'),
('int16', ibis.literal(2 ** 10), 'smallint_col'),
('int32', ibis.literal(2 ** 17), 'int_col'),
('int64', ibis.literal(2 ** 33), 'bigint_col'),
('float', ibis.literal(3.14), 'float_col'),
('double', ibis.literal(3.14), 'double_col'),
('string', ibis.literal('ibis'), 'string_col'),
('timestamp', ibis.timestamp('1961-04-10'), 'timestamp_col'),
],
)
@pytest.mark.xfail(
reason='Unknown reason. xfailing to restore the CI for udf tests. #2358'
)
def test_identity_primitive_types(
udfcon, alltypes, test_data_db, udf_ll, typ, lit_val, col_name
):
col_val = alltypes[col_name]
identity_func_testing(udf_ll, udfcon, test_data_db, typ, lit_val, col_val)
@pytest.mark.xfail(
reason='Unknown reason. xfailing to restore the CI for udf tests. #2358'
)
def test_decimal(udfcon, test_data_db, udf_ll):
col = udfcon.table('tpch_customer').c_acctbal
literal = ibis.literal(1).cast('decimal(12,2)')
name = '__tmp_udf_' + util.guid()
func = udf_creation_to_op(
udf_ll,
udfcon,
test_data_db,
name,
'Identity',
['decimal(12,2)'],
'decimal(12,2)',
)
expr = func(literal)
assert issubclass(type(expr), ir.ScalarExpr)
result = udfcon.execute(expr)
assert result == Decimal(1)
expr = func(col)
assert issubclass(type(expr), ir.ColumnExpr)
udfcon.execute(expr)
@pytest.mark.xfail(
reason='Unknown reason. xfailing to restore the CI for udf tests. #2358'
)
def test_mixed_inputs(udfcon, alltypes, test_data_db, udf_ll):
name = 'two_args'
symbol = 'TwoArgs'
inputs = ['int32', 'int32']
output = 'int32'
func = udf_creation_to_op(
udf_ll, udfcon, test_data_db, name, symbol, inputs, output
)
expr = func(alltypes.int_col, 1)
assert issubclass(type(expr), ir.ColumnExpr)
udfcon.execute(expr)
expr = func(1, alltypes.int_col)
assert issubclass(type(expr), ir.ColumnExpr)
udfcon.execute(expr)
expr = func(alltypes.int_col, alltypes.tinyint_col)
udfcon.execute(expr)
@pytest.mark.xfail(
reason='Unknown reason. xfailing to restore the CI for udf tests. #2358'
)
def test_implicit_typecasting(udfcon, alltypes, test_data_db, udf_ll):
col = alltypes.tinyint_col
literal = ibis.literal(1000)
identity_func_testing(udf_ll, udfcon, test_data_db, 'int32', literal, col)
def identity_func_testing(
udf_ll, udfcon, test_data_db, datatype, literal, column
):
inputs = [datatype]
name = '__tmp_udf_' + util.guid()
func = udf_creation_to_op(
udf_ll, udfcon, test_data_db, name, 'Identity', inputs, datatype
)
expr = func(literal)
assert issubclass(type(expr), ir.ScalarExpr)
result = udfcon.execute(expr)
# Hacky
if datatype == 'timestamp':
assert type(result) == pd.Timestamp
else:
lop = literal.op()
if isinstance(lop, ir.Literal):
np.testing.assert_allclose(lop.value, 5)
else:
np.testing.assert_allclose(result, udfcon.execute(literal), 5)
expr = func(column)
assert issubclass(type(expr), ir.ColumnExpr)
udfcon.execute(expr)
@pytest.mark.xfail(
reason='Unknown reason. xfailing to restore the CI for udf tests. #2358'
)
def test_mult_type_args(udfcon, alltypes, test_data_db, udf_ll):
symbol = 'AlmostAllTypes'
name = 'most_types'
inputs = [
'string',
'boolean',
'int8',
'int16',
'int32',
'int64',
'float',
'double',
]
output = 'int32'
func = udf_creation_to_op(
udf_ll, udfcon, test_data_db, name, symbol, inputs, output
)
expr = func('a', True, 1, 1, 1, 1, 1.0, 1.0)
result = udfcon.execute(expr)
assert result == 8
table = alltypes
expr = func(
table.string_col,
table.bool_col,
table.tinyint_col,
table.tinyint_col,
table.smallint_col,
table.smallint_col,
1.0,
1.0,
)
udfcon.execute(expr)
def test_all_type_args(udfcon, test_data_db, udf_ll):
pytest.skip('failing test, to be fixed later')
symbol = 'AllTypes'
name = 'all_types'
inputs = [
'string',
'boolean',
'int8',
'int16',
'int32',
'int64',
'float',
'double',
'decimal',
]
output = 'int32'
func = udf_creation_to_op(
udf_ll, udfcon, test_data_db, name, symbol, inputs, output
)
expr = func('a', True, 1, 1, 1, 1, 1.0, 1.0, 1.0)
result = udfcon.execute(expr)
assert result == 9
@pytest.mark.xfail(
reason='Unknown reason. xfailing to restore the CI for udf tests. #2358'
)
def test_udf_varargs(udfcon, alltypes, udf_ll, test_data_db):
t = alltypes
name = f'add_numbers_{util.guid()[:4]}'
input_sig = rules.varargs(rules.double)
func = api.wrap_udf(udf_ll, input_sig, 'double', 'AddNumbers', name=name)
func.register(name, test_data_db)
udfcon.create_function(func, database=test_data_db)
expr = func(t.double_col, t.double_col)
expr.execute()
def test_drop_udf_not_exists(udfcon):
random_name = util.guid()
with pytest.raises(Exception):
udfcon.drop_udf(random_name)
def test_drop_uda_not_exists(udfcon):
random_name = util.guid()
with pytest.raises(Exception):
udfcon.drop_uda(random_name)
def udf_creation_to_op(
udf_ll, udfcon, test_data_db, name, symbol, inputs, output
):
func = api.wrap_udf(udf_ll, inputs, output, symbol, name)
# self.temp_udfs.append((name, inputs))
udfcon.create_function(func, database=test_data_db)
func.register(name, test_data_db)
assert udfcon.exists_udf(name, test_data_db)
return func
def test_ll_uda_not_supported(uda_ll):
# LLVM IR UDAs are not supported as of Impala 2.2
with pytest.raises(com.IbisError):
conforming_wrapper(uda_ll, ['double'], 'double', 'Variance')
def conforming_wrapper(
where, inputs, output, prefix, serialize=True, name=None
):
kwds = {'name': name}
if serialize:
kwds['serialize_fn'] = f'{prefix}Serialize'
return api.wrap_uda(
where,
inputs,
output,
f'{prefix}Update',
init_fn=f'{prefix}Init',
merge_fn=f'{prefix}Merge',
finalize_fn=f'{prefix}Finalize',
**kwds,
)
@pytest.fixture
def wrapped_count_uda(uda_so):
name = f'user_count_{util.guid()}'
return api.wrap_uda(uda_so, ['int32'], 'int64', 'CountUpdate', name=name)
def test_count_uda(udfcon, alltypes, test_data_db, wrapped_count_uda):
func = wrapped_count_uda
func.register(func.name, test_data_db)
udfcon.create_function(func, database=test_data_db)
# it works!
func(alltypes.int_col).execute()
# self.temp_udas.append((func.name, ['int32']))
def test_list_udas(udfcon, temp_database, wrapped_count_uda):
func = wrapped_count_uda
db = temp_database
udfcon.create_function(func, database=db)
funcs = udfcon.list_udas(database=db)
f = funcs[0]
assert f.name == func.name
assert f.inputs == func.inputs
assert f.output == func.output
@pytest.mark.xfail(
reason='Unknown reason. xfailing to restore the CI for udf tests. #2358'
)
def test_drop_database_with_udfs_and_udas(
udfcon, temp_database, wrapped_count_uda
):
uda1 = wrapped_count_uda
udf1 = api.wrap_udf(
udf_ll,
['boolean'],
'boolean',
'Identity',
f'udf_{util.guid()}',
)
db = temp_database
udfcon.create_database(db)
udfcon.create_function(uda1, database=db)
udfcon.create_function(udf1, database=db)
# drop happens in test tear down
class TestUDFDDL(unittest.TestCase):
def setUp(self):
self.con = MockBackend()
self.name = 'test_name'
self.inputs = ['string', 'string']
self.output = 'int64'
def test_create_udf(self):
func = api.wrap_udf(
'/foo/bar.so',
self.inputs,
self.output,
so_symbol='testFunc',
name=self.name,
)
stmt = ddl.CreateUDF(func)
result = stmt.compile()
expected = (
"CREATE FUNCTION `test_name`(string, string) "
"returns bigint "
"location '/foo/bar.so' symbol='testFunc'"
)
assert result == expected
def test_create_udf_type_conversions(self):
inputs = ['string', 'int8', 'int16', 'int32']
func = api.wrap_udf(
'/foo/bar.so',
inputs,
self.output,
so_symbol='testFunc',
name=self.name,
)
stmt = ddl.CreateUDF(func)
# stmt = ddl.CreateFunction('/foo/bar.so', 'testFunc',
# ,
# self.output, self.name)
result = stmt.compile()
expected = (
"CREATE FUNCTION `test_name`(string, tinyint, "
"smallint, int) returns bigint "
"location '/foo/bar.so' symbol='testFunc'"
)
assert result == expected
def test_delete_udf_simple(self):
stmt = ddl.DropFunction(self.name, self.inputs)
result = stmt.compile()
expected = "DROP FUNCTION `test_name`(string, string)"
assert result == expected
def test_delete_udf_if_exists(self):
stmt = ddl.DropFunction(self.name, self.inputs, must_exist=False)
result = stmt.compile()
expected = "DROP FUNCTION IF EXISTS `test_name`(string, string)"
assert result == expected
def test_delete_udf_aggregate(self):
stmt = ddl.DropFunction(self.name, self.inputs, aggregate=True)
result = stmt.compile()
expected = "DROP AGGREGATE FUNCTION `test_name`(string, string)"
assert result == expected
def test_delete_udf_db(self):
stmt = ddl.DropFunction(self.name, self.inputs, database='test')
result = stmt.compile()
expected = "DROP FUNCTION test.`test_name`(string, string)"
assert result == expected
def test_create_uda(self):
def make_ex(serialize=False):
if serialize:
serialize = "\nserialize_fn='Serialize'"
else:
serialize = ""
return (
(
"CREATE AGGREGATE FUNCTION "
"bar.`test_name`(string, string)"
" returns bigint location '/foo/bar.so'"
"\ninit_fn='Init'"
"\nupdate_fn='Update'"
"\nmerge_fn='Merge'"
)
+ serialize
+ "\nfinalize_fn='Finalize'"
)
for ser in [True, False]:
func = api.wrap_uda(
'/foo/bar.so',
self.inputs,
self.output,
update_fn='Update',
init_fn='Init',
merge_fn='Merge',
finalize_fn='Finalize',
serialize_fn='Serialize' if ser else None,
)
stmt = ddl.CreateUDA(func, name=self.name, database='bar')
result = stmt.compile()
expected = make_ex(ser)
assert result == expected
def test_list_udf(self):
stmt = ddl.ListFunction('test')
result = stmt.compile()
expected = 'SHOW FUNCTIONS IN test'
assert result == expected
def test_list_udfs_like(self):
stmt = ddl.ListFunction('test', like='identity')
result = stmt.compile()
expected = "SHOW FUNCTIONS IN test LIKE 'identity'"
assert result == expected
def test_list_udafs(self):
stmt = ddl.ListFunction('test', aggregate=True)
result = stmt.compile()
expected = 'SHOW AGGREGATE FUNCTIONS IN test'
assert result == expected
def test_list_udafs_like(self):
stmt = ddl.ListFunction('test', like='identity', aggregate=True)
result = stmt.compile()
expected = "SHOW AGGREGATE FUNCTIONS IN test LIKE 'identity'"
assert result == expected
|
{
"content_hash": "3d7df9fc2f1f8242c478cf530e8d68b0",
"timestamp": "",
"source": "github",
"line_count": 634,
"max_line_length": 78,
"avg_line_length": 29.518927444794954,
"alnum_prop": 0.5683141864814321,
"repo_name": "cloudera/ibis",
"id": "bba4da5a62c0fbda9202a2bba7d952f13199696e",
"size": "18715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibis/backends/impala/tests/test_udf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "44943"
},
{
"name": "CMake",
"bytes": "4383"
},
{
"name": "Python",
"bytes": "2570944"
},
{
"name": "Shell",
"bytes": "1989"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.