hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5b8dec39e4dcffcbe21b052a1d8080631863cd63 | 3,720 | py | Python | examples/examples/usage.py | alexa984/django-enum-choices | 2eb9f1faea2133d65350a3f05f59d6ba72050925 | [
"MIT"
] | null | null | null | examples/examples/usage.py | alexa984/django-enum-choices | 2eb9f1faea2133d65350a3f05f59d6ba72050925 | [
"MIT"
] | null | null | null | examples/examples/usage.py | alexa984/django-enum-choices | 2eb9f1faea2133d65350a3f05f59d6ba72050925 | [
"MIT"
] | null | null | null | from .enumerations import MyEnum
from .models import MyModel, MyModelMultiple
from .serializers import (
MySerializer,
MyModelSerializer,
ImplicitMyModelSerializer,
MultipleMySerializer,
ImplicitMultipleMyModelSerializer,
CustomChoiceBuilderSerializer
)
from .forms import StandardEnumForm, ModelEnumForm, CustomChoiceBuilderEnumForm
# Object Creation
def create_instance():
return MyModel.objects.create(enumerated_field=MyEnum.A)
# Overriding field value
def update_instance():
instance = create_instance()
instance.enumerated_field = MyEnum.B
instance.save()
return instance
# Object creation with multiple field
def create_instance_with_multiple_field():
return MyModelMultiple.objects.create(enumerated_field=[MyEnum.A, MyEnum.B])
# Overriding multiple field value
def update_instance_with_multiple_field():
instance = create_instance_with_multiple_field()
instance.enumerated_field = [MyEnum.B]
instance.save()
return instance
# QuerySet filtering
def filter_queryset():
return MyModel.objects.filter(enumerated_field=MyEnum.A)
# Serializer usage
def serialize_value():
serializer = MySerializer({
'enumerated_field': MyEnum.A
})
return serializer.data
def deserialize_value():
serializer = MySerializer(data={
'enumerated_field': 'a'
})
serializer.is_valid()
return serializer.validated_data
# Explicit ModelSerializer usage
def serialize_model_from_explicit_serializer():
instance = create_instance()
serializer = MyModelSerializer(instance)
return serializer.data
def create_model_from_explicit_serializer():
serializer = MyModelSerializer(data={
'enumerated_field': 'a'
})
serializer.is_valid()
return serializer.save()
# Implicit ModelSerializer usage
def serialize_model_from_implicit_serializer():
instance = create_instance()
serializer = ImplicitMyModelSerializer(instance)
return serializer.data
def create_model_from_implicit_serializer():
serializer = ImplicitMyModelSerializer(data={
'enumerated_field': 'a'
})
serializer.is_valid()
return serializer.save()
# Multiple Standard Serializer Usage
def serialize_multiple_value():
serializer = MultipleMySerializer({
'enumerated_field': [MyEnum.A, MyEnum.B]
})
return serializer.data
def deserialize_multiple_value():
serializer = MultipleMySerializer(data={
'enumerated_field': ['a', 'b']
})
serializer.is_valid()
return serializer.validated_data
# Implicit Multiple ModelSerializer usage
def serialize_model_from_multiple_field_serializer():
instance = create_instance_with_multiple_field()
serializer = ImplicitMultipleMyModelSerializer(instance)
return serializer.data
def create_model_from_multiple_field_serializer():
serializer = ImplicitMultipleMyModelSerializer(data={
'enumerated_field': ['a', 'b']
})
serializer.is_valid()
return serializer.save()
def serialize_with_custom_choice_builder():
serializer = CustomChoiceBuilderSerializer({
'enumerated_field': MyEnum.A
})
return serializer.data
def get_value_from_standard_form():
form = StandardEnumForm({
'enumerated_field': 'a'
})
form.is_valid()
return form.cleaned_data
def create_instance_from_model_form():
form = ModelEnumForm({
'enumerated_field': 'a'
})
form.is_valid()
return form.save(commit=True)
def get_value_from_form_with_custom_choice_builder_field():
form = CustomChoiceBuilderEnumForm({
'enumerated_field': 'Custom_a'
})
form.is_valid()
return form.cleaned_data
| 21.627907 | 80 | 0.735753 |
32ef7c3230ea0f500f3ff52337125416351c1d35 | 3,865 | py | Python | mmtbx/building/alternate_conformations/scripts/flatten_map_outside_selection.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | mmtbx/building/alternate_conformations/scripts/flatten_map_outside_selection.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | mmtbx/building/alternate_conformations/scripts/flatten_map_outside_selection.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null |
"""
For the obsessive-compulsive who absolutely must have perfectly contoured
density around their ligand. Handy for making figures, but prone to abuse,
which is why it's not in the command_line directory.
"""
from __future__ import division
import sys
def flatten_map(map, xray_structure, selection):
from cctbx import maptbx
from scitbx.array_family import flex
sites = xray_structure.sites_cart().select(selection)
hd_sel = xray_structure.hd_selection()
radii = flex.double()
for i_seq in selection :
if (hd_sel[i_seq]):
radii.append(1.0)
else :
radii.append(1.5)
sel = maptbx.grid_indices_around_sites(
unit_cell = xray_structure.unit_cell(),
fft_n_real = map.focus(),
fft_m_real = map.all(),
sites_cart = sites,
site_radii = radii)
bg_sel = flex.bool(map.size(), True)
bg_sel.set_selected(sel, False)
map.as_1d().set_selected(bg_sel, 0)
return map
def run(args, out=sys.stdout):
master_phil_str = """
map_coeffs = None
.type = path
model = None
.type = path
selection = all
.type = atom_selection
prefix = flattened
.type = str
write_pymol = False
.type = bool
"""
import iotbx.phil
cmdline = iotbx.phil.process_command_line_with_files(
args=args,
master_phil_string=master_phil_str,
reflection_file_def="map_coeffs",
pdb_file_def="model",
usage_string="""\
phenix.flatten_map_outside_selection model.pdb maps.mtz selection='chain A'
For each set of map coefficients in the input MTZ file, performs an FFT to
obtain the electron density, and sets all grid points outside the defined
atom selection to zero. Used for generating figures in PyMOL.""")
params = cmdline.work.extract()
assert (not None in [params.model, params.map_coeffs, params.selection])
from iotbx import file_reader
pdb_in = file_reader.any_file(params.model, force_type="pdb")
hierarchy = pdb_in.file_object.construct_hierarchy()
xrs = pdb_in.file_object.xray_structure_simple()
sites_cart = xrs.sites_cart()
sel = hierarchy.atom_selection_cache().selection(params.selection)
isel = sel.iselection()
mtz_in = file_reader.any_file(params.map_coeffs, force_type="hkl")
two_fofc = fofc = None
for miller_array in mtz_in.file_server.miller_arrays :
if (miller_array.is_complex_array()):
label = miller_array.info().labels[0]
if (label.startswith("F-model")) : continue
real_map = miller_array.fft_map(resolution_factor=0.25
).apply_sigma_scaling().real_map_unpadded()
flatten_map(
map=real_map,
xray_structure=xrs,
selection=isel)
import iotbx.map_tools
file_name = params.prefix + "_%s.ccp4" % label
iotbx.map_tools.write_ccp4_map(
sites_cart=sites_cart,
unit_cell=xrs.unit_cell(),
map_data=real_map,
n_real=real_map.focus(),
file_name=file_name,
buffer=5)
print "wrote %s" % file_name
if (label == "2FOFCWT"):
two_fofc = file_name
elif (label == "FOFCWT"):
fofc = file_name
if (params.write_pymol):
f = open(params.prefix + ".pml", "w")
f.write("""set normalize_ccp4_maps, 0\n""")
f.write("""load %s\n""" % params.model)
f.write("""show sticks, (%s) and not elem H\n""" % params.selection)
f.write("""color yellow, elem C\n""")
f.write("""hide lines\n""")
f.write("""hide nonbonded\n""")
f.write("""zoom %s\n""" % params.selection)
if (two_fofc is not None):
f.write("""load %s, 2fofc\n""" % two_fofc)
f.write("""isomesh m1, 2fofc, 1.0, %s, 5\n""" % params.selection)
f.write("""color grey50, m1\n""")
if (fofc is not None):
f.write("""load %s, fofc\n""" % fofc)
f.write("""isomesh m2, fofc, 3.0, %s, 5\n""" % params.selection)
f.write("""color green, m2\n""")
f.close()
if (__name__ == "__main__"):
run(sys.argv[1:])
| 33.903509 | 75 | 0.674774 |
10779cb6aa8890398d873218aa3258d504a0f5a0 | 2,307 | py | Python | tests/test_0674-categorical-validation.py | colesbury/awkward-1.0 | d036ab18eb54de8a2571d9f179d315ac8ee22119 | [
"BSD-3-Clause"
] | null | null | null | tests/test_0674-categorical-validation.py | colesbury/awkward-1.0 | d036ab18eb54de8a2571d9f179d315ac8ee22119 | [
"BSD-3-Clause"
] | null | null | null | tests/test_0674-categorical-validation.py | colesbury/awkward-1.0 | d036ab18eb54de8a2571d9f179d315ac8ee22119 | [
"BSD-3-Clause"
] | null | null | null | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
pyarrow = pytest.importorskip("pyarrow")
def test_categorical_is_valid():
# validate a categorical array by its content
arr = ak.Array([2019, 2020, 2021, 2020, 2019])
categorical = ak.to_categorical(arr)
assert ak.is_valid(categorical)
def test_optional_categorical_from_arrow():
# construct categorical array from option-typed DictionaryArray
indices = pyarrow.array([0, 1, 0, 1, 2, 0, 2])
nan_indices = pyarrow.array([0, 1, 0, 1, 2, None, 0, 2])
dictionary = pyarrow.array([2019, 2020, 2021])
dict_array = pyarrow.DictionaryArray.from_arrays(indices, dictionary)
categorical_array = ak.from_arrow(dict_array)
assert categorical_array.layout.parameter("__array__") == "categorical"
option_dict_array = pyarrow.DictionaryArray.from_arrays(nan_indices, dictionary)
option_categorical_array = ak.from_arrow(option_dict_array)
assert option_categorical_array.layout.parameter("__array__") == "categorical"
def test_categorical_from_arrow_ChunkedArray():
indices = [0, 1, 0, 1, 2, 0, 2]
indices_new_schema = [0, 1, 0, 1, 0]
dictionary = pyarrow.array([2019, 2020, 2021])
dictionary_new_schema = pyarrow.array([2019, 2020])
dict_array = pyarrow.DictionaryArray.from_arrays(pyarrow.array(indices), dictionary)
dict_array_new_schema = pyarrow.DictionaryArray.from_arrays(
pyarrow.array(indices_new_schema), dictionary_new_schema
)
batch = pyarrow.RecordBatch.from_arrays([dict_array], ["year"])
batch_new_schema = pyarrow.RecordBatch.from_arrays(
[dict_array_new_schema], ["year"]
)
batches = [batch] * 3
batches_mixed_schema = [batch] + [batch_new_schema]
table = pyarrow.Table.from_batches(batches)
table_mixed_schema = pyarrow.Table.from_batches(batches_mixed_schema)
array = ak.from_arrow(table)
array_mixed_schema = ak.from_arrow(table_mixed_schema)
assert np.asarray(array.layout.field(0).content.index).tolist() == indices * 3
assert (
np.asarray(array_mixed_schema.layout.field(0).content.index).tolist()
== indices + indices_new_schema
)
| 35.492308 | 88 | 0.724317 |
0137994cd83185a8460d0d88709255369b9509b0 | 12,320 | py | Python | py3/nn/experiments/wgan/wgan_mnist_fc.py | fr42k/gap-wgan-gp | 4e373c43d606a1b83f76893d93f9cf8be8cd460d | [
"MIT"
] | null | null | null | py3/nn/experiments/wgan/wgan_mnist_fc.py | fr42k/gap-wgan-gp | 4e373c43d606a1b83f76893d93f9cf8be8cd460d | [
"MIT"
] | null | null | null | py3/nn/experiments/wgan/wgan_mnist_fc.py | fr42k/gap-wgan-gp | 4e373c43d606a1b83f76893d93f9cf8be8cd460d | [
"MIT"
] | null | null | null | """Generative Adversarial Network for MNIST."""
import os, sys
sys.path.append(os.getcwd())
try: # This only matters on Ishaan's computer
import experiment_tools
experiment_tools.wait_for_gpu(tf=True)
except ImportError:
pass
import tflib as lib
import tflib.debug
import tflib.ops.linear
import tflib.ops.conv2d
import tflib.save_images
import tflib.mnist
import numpy as np
import tensorflow as tf
import sklearn.datasets
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import time
import functools
BATCH_SIZE = 100
ITERS = 100000
DIM = 16
DIM_G = 64
def LeakyReLU(x, alpha=0.25):
return tf.maximum(alpha*x, x)
def GenLayer(name, n_in, n_out, inputs, alpha=0.25):
output = lib.ops.linear.Linear(name+'.Linear', n_in, 2*n_out, inputs)
output_1, output_2 = tf.split(1,2,output)
return tf.nn.sigmoid(output_1) * output_2
def DiscLayer(name, n_in, n_out, inputs, alpha=0.25):
output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs)
return tf.tanh(output)
def DiscLayer2(name, n_in, n_out, inputs, alpha=0.25):
output = lib.ops.linear.Linear(name+'.Linear', n_in, n_out, inputs, initialization='he')
return tf.nn.relu(output)
def FCGenerator(n_samples):
noise = tf.random_uniform(
shape=[n_samples, 128],
minval=-np.sqrt(3),
maxval=np.sqrt(3)
)
output = GenLayer('Generator.1', 128, 512, noise)
output = GenLayer('Generator.2', 512, 512, output)
output = GenLayer('Generator.3', 512, 512, output)
output = lib.ops.linear.Linear('Generator.Out', 512, 784, output)
output = tf.nn.sigmoid(output)
return output
def FCDiscriminator(inputs, name='Discriminator'):
output = DiscLayer(name+'.1', 784, 512, inputs)
output = DiscLayer(name+'.2', 512, 512, output)
output = DiscLayer(name+'.3', 512, 512, output)
output = lib.ops.linear.Linear(name+'.Out', 512, 1, output)
return tf.reshape(output, [-1])
def FCDiscriminator2(inputs, name='Discriminator2'):
output = DiscLayer(name+'.1', 784, 512, inputs)
output = DiscLayer(name+'.2', 512, 512, output)
output = DiscLayer(name+'.3', 512, 512, output)
output = lib.ops.linear.Linear(name+'.Out', 512, 1, output)
return tf.reshape(output, [-1])
def FCDiscriminator3(inputs, name='Discriminator3'):
output = DiscLayer2(name+'.1', 784, 512, inputs)
output = DiscLayer2(name+'.2', 512, 512, output)
output = DiscLayer2(name+'.3', 512, 512, output)
output = lib.ops.linear.Linear(name+'.Out', 512, 1, output)
return tf.reshape(output, [-1])
# def SubpixelConv2D(*args, **kwargs):
# kwargs['output_dim'] = 4*kwargs['output_dim']
# output = lib.ops.conv2d.Conv2D(*args, **kwargs)
# output = tf.transpose(output, [0,2,3,1])
# output = tf.depth_to_space(output, 2)
# output = tf.transpose(output, [0,3,1,2])
# return output
# def ResBlock(name, dim, inputs):
# output = tf.nn.relu(inputs)
# output = lib.ops.conv2d.Conv2D(name+'.1', dim, dim, 3, output)
# output = tf.nn.relu(output)
# output = lib.ops.conv2d.Conv2D(name+'.2', dim, dim, 3, output)
# return output + inputs
# def ResBlockG(name, dim, inputs):
# output = tf.nn.relu(inputs)
# output = lib.ops.conv2d.Conv2D(name+'.1', dim, dim, 3, output)
# output = tf.nn.relu(output)
# output = lib.ops.conv2d.Conv2D(name+'.2', dim, dim, 3, output)
# return output + inputs
# def ResBlockDownsample(name, dim, output_dim, inputs):
# output = tf.nn.relu(inputs)
# output = lib.ops.conv2d.Conv2D(name+'.1', dim, dim, 3, output)
# output = tf.nn.relu(output)
# output = lib.ops.conv2d.Conv2D(name+'.2', dim, output_dim, 3, output, stride=2)
# return output + lib.ops.conv2d.Conv2D(name+'.skip', dim, output_dim, 1, inputs, stride=2)
# def ResBlockUpsample(name, dim, output_dim, inputs):
# output = tf.nn.relu(inputs)
# output = SubpixelConv2D(name+'.1', input_dim=dim, output_dim=output_dim, filter_size=3, inputs=output)
# output = tf.nn.relu(output)
# output = lib.ops.conv2d.Conv2D(name+'.2', output_dim, output_dim, 3, output)
# return output + SubpixelConv2D(name+'.skip', input_dim=dim, output_dim=output_dim, filter_size=1, inputs=inputs)
# def ConvGenerator(n_samples):
# noise = tf.random_uniform(
# shape=[n_samples, 128],
# minval=-np.sqrt(3),
# maxval=np.sqrt(3)
# )
# output = lib.ops.linear.Linear('Generator.Input', 128, 4*4*(8*DIM_G), noise)
# output = tf.reshape(output, [-1, 8*DIM_G, 4, 4])
# output = ResBlockG('Generator.1Pre', 8*DIM_G, output)
# output = ResBlockG('Generator.1', 8*DIM_G, output)
# output = ResBlockUpsample('Generator.2', 8*DIM_G, 4*DIM_G, output)
# output = output[:, :, :7, :7]
# output = ResBlockG('Generator.3', 4*DIM_G, output)
# output = ResBlockG('Generator.4', 4*DIM_G, output)
# output = ResBlockUpsample('Generator.5', 4*DIM_G, 2*DIM_G, output)
# output = ResBlockG('Generator.6', 2*DIM_G, output)
# output = ResBlockG('Generator.7', 2*DIM_G, output)
# output = ResBlockUpsample('Generator.8', 2*DIM_G, DIM_G, output)
# output = ResBlockG('Generator.9', DIM_G, output)
# output = lib.ops.conv2d.Conv2D('Generator.Output', DIM_G, 1, 1, output, he_init=False)
# output = tf.nn.sigmoid(output / 5.)
# output = tf.reshape(output, [-1, 784])
# return output
# def ConvDiscriminator(inputs):
# output = tf.reshape(inputs, [-1, 1, 28, 28])
# output = lib.ops.conv2d.Conv2D('Discriminator.Input', 1, DIM, 1, output)
# output = ResBlock('Discriminator.1', DIM, output)
# output = ResBlockDownsample('Discriminator.2', DIM, 2*DIM, output)
# output = ResBlock('Discriminator.3', 2*DIM, output)
# output = ResBlock('Discriminator.4', 2*DIM, output)
# output = ResBlockDownsample('Discriminator.5', 2*DIM, 4*DIM, output)
# output = ResBlock('Discriminator.6', 4*DIM, output)
# output = ResBlock('Discriminator.7', 4*DIM, output)
# output = ResBlockDownsample('Discriminator.8', 4*DIM, 8*DIM, output)
# output = ResBlock('Discriminator.9', 8*DIM, output)
# # output = ResBlock('Discriminator.9Post', 8*DIM, output)
# output = tf.reshape(output, [-1, 4*4*(8*DIM)])
# output = lib.ops.linear.Linear('Discriminator.Out', 4*4*(8*DIM), 1, output)
# return tf.reshape(output, [-1])
Generator = FCGenerator
import functools
Discriminator = FCDiscriminator
Discriminator2 = FCDiscriminator2
Discriminator3 = FCDiscriminator3
real_data = tf.placeholder(tf.float32, shape=[None, 784])
fake_data = Generator(BATCH_SIZE)
disc_out = Discriminator(tf.concat(0, [real_data, fake_data]))
disc_real = disc_out[:BATCH_SIZE]
disc_fake = disc_out[BATCH_SIZE:2*BATCH_SIZE]
disc_2_real = Discriminator2(real_data)
disc_2_fake = Discriminator2(fake_data)
disc_3_real = Discriminator3(real_data)
disc_3_fake = Discriminator3(fake_data)
# WGAN generator loss
gen_cost = -tf.reduce_mean(disc_fake)
# WGAN discriminator loss
disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)
disc_2_cost = tf.reduce_mean(disc_2_fake) - tf.reduce_mean(disc_2_real)
disc_3_cost = tf.reduce_mean(disc_3_fake) - tf.reduce_mean(disc_3_real)
# WGAN lipschitz-penalty
alpha = tf.random_uniform(
shape=[BATCH_SIZE,1],
minval=0.,
maxval=1.
)
differences = fake_data - real_data
interpolates = real_data + (alpha*differences)
interpolates_batch = interpolates
gradients = tf.gradients(Discriminator(interpolates_batch), [interpolates_batch])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
lipschitz_penalty = tf.reduce_mean((slopes-1.)**2)
wgan_disc_cost = disc_cost
disc_cost += 10*lipschitz_penalty
lipschitz_penalty = tf.reduce_mean(slopes)
gradients_2 = tf.gradients(Discriminator2(interpolates), [interpolates])[0]
slopes_2 = tf.sqrt(tf.reduce_sum(tf.square(gradients_2), reduction_indices=[1]))
lipschitz_penalty_2 = tf.reduce_mean((slopes_2-1.)**2)
wgan_disc_2_cost = disc_2_cost
disc_2_cost += 10*lipschitz_penalty_2
lipschitz_penalty_2 = tf.reduce_mean(slopes_2)
gradients_3 = tf.gradients(Discriminator3(interpolates), [interpolates])[0]
slopes_3 = tf.sqrt(tf.reduce_sum(tf.square(gradients_3), reduction_indices=[1]))
lipschitz_penalty_3 = tf.reduce_mean((slopes_3-1.)**2)
wgan_disc_3_cost = disc_3_cost
disc_3_cost += 10*lipschitz_penalty_3
lipschitz_penalty_3 = tf.reduce_mean(slopes_3)
if len(lib.params_with_name('Generator')):
gen_train_op = tf.train.RMSPropOptimizer(learning_rate=1e-4).minimize(gen_cost, var_list=lib.params_with_name('Generator.'))
# gen_train_op = tf.train.AdamOptimizer(learning_rate=1e-3, beta1=0.5).minimize(gen_cost, var_list=lib.params_with_name('Generator'))
else:
gen_train_op = None
disc_train_op = tf.train.RMSPropOptimizer(learning_rate=1e-4).minimize(disc_cost, var_list=lib.params_with_name('Discriminator.'))
disc_2_train_op = tf.train.RMSPropOptimizer(learning_rate=1e-4).minimize(disc_2_cost, var_list=lib.params_with_name('Discriminator2.'))
disc_3_train_op = tf.train.RMSPropOptimizer(learning_rate=1e-4).minimize(disc_3_cost, var_list=lib.params_with_name('Discriminator3.'))
# disc_train_op = tf.train.AdamOptimizer(learning_rate=5e-4, beta1=0.5).minimize(disc_cost, var_list=lib.params_with_name('Discriminator'))
frame_i = [0]
def generate_image(frame, true_dist):
samples = session.run(fake_data)
lib.save_images.save_images(samples[:100], 'samples_{}.jpg'.format(frame))
train_gen, _, _ = lib.mnist.load(BATCH_SIZE, BATCH_SIZE)
def inf_train_gen():
while True:
for images,targets in train_gen():
yield images
with tf.Session() as session:
session.run(tf.initialize_all_variables())
def generate_samples(iteration):
samples = session.run(fake_images)
lib.save_images.save_images(samples.reshape((-1,28,28)), 'samples_{}.jpg'.format(iteration))
gen = inf_train_gen()
disc_costs, wgan_disc_costs, lipschitz_penalties, disc_2_costs, wgan_disc_2_costs, lipschitz_penalties_2, disc_3_costs, wgan_disc_3_costs, lipschitz_penalties_3, gen_costs = [], [], [], [], [], [], [], [], [], []
start_time = time.time()
for iteration in xrange(ITERS):
_data = gen.next()
if iteration % 2 == 0:
if (iteration < 200):
disc_iters = 100
else:
disc_iters = 5
for i in xrange(disc_iters):
# _disc_cost, _wgan_disc_cost, _lipschitz_penalty, _ = session.run([disc_cost, wgan_disc_cost, lipschitz_penalty, disc_train_op], feed_dict={real_data: _data})
_disc_cost, _wgan_disc_cost, _lipschitz_penalty, _disc_2_cost, _wgan_disc_2_cost, _lipschitz_penalty_2, _disc_3_cost, _wgan_disc_3_cost, _lipschitz_penalty_3, _, _, _ = session.run([disc_cost, wgan_disc_cost, lipschitz_penalty, disc_2_cost, wgan_disc_2_cost, lipschitz_penalty_2, disc_3_cost, wgan_disc_3_cost, lipschitz_penalty_3, disc_train_op, disc_2_train_op, disc_3_train_op], feed_dict={real_data: _data})
_data = gen.next()
disc_costs.append(_disc_cost)
wgan_disc_costs.append(_wgan_disc_cost)
lipschitz_penalties.append(_lipschitz_penalty)
disc_2_costs.append(_disc_2_cost)
wgan_disc_2_costs.append(_wgan_disc_2_cost)
lipschitz_penalties_2.append(_lipschitz_penalty_2)
disc_3_costs.append(_disc_3_cost)
wgan_disc_3_costs.append(_wgan_disc_3_cost)
lipschitz_penalties_3.append(_lipschitz_penalty_3)
else:
if gen_train_op is not None:
_gen_cost, _ = session.run([gen_cost, gen_train_op], feed_dict={real_data: _data})
gen_costs.append(_gen_cost)
if iteration % 100 == 0:
print "iter:\t{}\tdisc:\t{:.3f}\t{:.3f}\t{:.3f}\tgen:\t{:.3f}\ttime:\t{:.3f}".format(iteration, np.mean(disc_costs), np.mean(disc_2_costs), np.mean(disc_3_costs), np.mean(gen_costs), time.time() - start_time)
disc_costs, wgan_disc_costs, lipschitz_penalties, disc_2_costs, wgan_disc_2_costs, lipschitz_penalties_2, disc_3_costs, wgan_disc_3_costs, lipschitz_penalties_3, gen_costs = [], [], [], [], [], [], [], [], [], []
generate_image(iteration, _data)
start_time = time.time() | 40.526316 | 427 | 0.695455 |
2220161f721fd331cfc53582d8139ae98f3bcf04 | 2,154 | py | Python | model/model_esdr.py | liwanjunit/ASRGAN | ac01e546939c435c246fbdce64606464f8fdfc00 | [
"MIT"
] | null | null | null | model/model_esdr.py | liwanjunit/ASRGAN | ac01e546939c435c246fbdce64606464f8fdfc00 | [
"MIT"
] | null | null | null | model/model_esdr.py | liwanjunit/ASRGAN | ac01e546939c435c246fbdce64606464f8fdfc00 | [
"MIT"
] | null | null | null | import math
import torch
from torch import nn
class EDSR(nn.Module):
def __init__(self, scale_factor, img_range=255., rgb_mean=(0.4488, 0.4371, 0.4040)):
upsample_block_num = int(math.log(scale_factor, 2))
super(EDSR, self).__init__()
self.img_range = img_range
self.mean = torch.Tensor(rgb_mean).view(1, 3, 1, 1)
if torch.cuda.is_available():
self.mean = self.mean.cuda()
self.conv_first = nn.Conv2d(3, 64, kernel_size=9, padding=4)
self.residual = ResidualBlock(64)
res = [self.residual for _ in range(16)]
self.res = nn.Sequential(*res)
self.conv_after_res = nn.Conv2d(64, 64, kernel_size=3, padding=1)
upsample = [UpsampleBLock(64, 2) for _ in range(upsample_block_num)]
self.upsample = nn.Sequential(*upsample)
self.conv_last = nn.Conv2d(64, 3, kernel_size=9, padding=4)
def forward(self, x):
self.mean = self.mean.type_as(x)
x = (x - self.mean) * self.img_range
x = self.conv_first(x)
res = self.conv_after_res(self.res(x))
res += x
upsample = self.upsample(res)
x = self.conv_last(upsample)
x = x / self.img_range + self.mean
return x
class ResidualBlock(nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
self.relu = nn.ReLU()
def forward(self, x):
residual = self.conv1(x)
residual = self.relu(residual)
residual = self.conv2(residual)
return x + residual
class UpsampleBLock(nn.Module):
def __init__(self, in_channels, up_scale):
super(UpsampleBLock, self).__init__()
self.conv = nn.Conv2d(in_channels, in_channels * up_scale ** 2, kernel_size=3, padding=1)
self.pixel_shuffle = nn.PixelShuffle(up_scale)
self.prelu = nn.PReLU()
def forward(self, x):
x = self.conv(x)
x = self.pixel_shuffle(x)
x = self.prelu(x)
return x
| 31.676471 | 97 | 0.620241 |
2207ed32cc6e975e47c0cce89f1a0a9874d8a5c7 | 7,774 | py | Python | doc/conf.py | nkuttler/flaskwallet | 87fe06fdffd424341305a79e7a61c56254bdd6c9 | [
"BSD-3-Clause"
] | 17 | 2015-01-18T23:24:37.000Z | 2021-06-26T17:31:07.000Z | doc/conf.py | nkuttler/flaskwallet | 87fe06fdffd424341305a79e7a61c56254bdd6c9 | [
"BSD-3-Clause"
] | 1 | 2016-07-29T11:26:37.000Z | 2016-07-29T11:38:14.000Z | doc/conf.py | nkuttler/flaskwallet | 87fe06fdffd424341305a79e7a61c56254bdd6c9 | [
"BSD-3-Clause"
] | 10 | 2015-03-10T06:07:01.000Z | 2021-06-26T17:31:18.000Z | # -*- coding: utf-8 -*-
#
# Flaskwallet documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 17 14:33:26 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flaskwallet'
copyright = u'2013, Nicolas Kuttler'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6'
# The full version, including alpha/beta/rc tags.
release = '0.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flaskwalletdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Flaskwallet.tex', u'Flaskwallet Documentation',
u'Nicolas Kuttler', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'flaskwallet', u'Flaskwallet Documentation',
[u'Nicolas Kuttler'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Flaskwallet', u'Flaskwallet Documentation',
u'Nicolas Kuttler', 'Flaskwallet', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 31.99177 | 80 | 0.715719 |
c15d05c04c1b14e06769e70780eb0a21c32e61fd | 3,029 | py | Python | cleanup.py | dotran/namesilo-letsencrypt | 9d1a5d8875d3253afa18cb0d4c3184fa74d9b248 | [
"BSD-3-Clause"
] | 27 | 2019-06-20T08:39:56.000Z | 2022-03-23T22:48:01.000Z | cleanup.py | dotran/namesilo-letsencrypt | 9d1a5d8875d3253afa18cb0d4c3184fa74d9b248 | [
"BSD-3-Clause"
] | 2 | 2020-03-14T19:49:47.000Z | 2020-06-12T00:33:47.000Z | cleanup.py | dotran/namesilo-letsencrypt | 9d1a5d8875d3253afa18cb0d4c3184fa74d9b248 | [
"BSD-3-Clause"
] | 14 | 2019-06-20T08:40:02.000Z | 2022-03-07T01:25:57.000Z | #!/usr/bin/env python3
# cleanup.py
#
# Copyright (c) 2019-2020, Erik C. Thauvin (erik@thauvin.net)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of this project nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import tempfile
import urllib.request
import tldextract
import untangle
from config import apikey
domain = os.environ['CERTBOT_DOMAIN']
tmpdir = os.path.join(tempfile.gettempdir(), "CERTBOT_" + domain)
if "NAMESILO_API" in os.environ:
apikey = os.environ['NAMESILO_API']
tld = tldextract.extract(domain)
nsdomain = tld.domain + "." + tld.suffix
url = "https://www.namesilo.com/api/dnsDeleteRecord\
?version=1&type=xml&key=" + apikey + "&domain=" + nsdomain + "&rrid="
def getrequest(record_id):
return urllib.request.Request(
url + record_id,
data=None,
headers={
'User-Agent': ('Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:74.0) '
'Gecko/20100101 Firefox/74.0')
}
)
idFile = os.path.join(tmpdir, "RECORD_ID")
if os.path.isfile(idFile):
f = open(idFile, "r")
for line in f:
with urllib.request.urlopen(getrequest(line.rstrip())) as response:
html = response.read()
xml = untangle.parse(str(html, 'utf-8'))
if xml.namesilo.reply.code.cdata != '300':
print("{}: {} ({})".format(
domain,
xml.namesilo.reply.detail.cdata,
xml.namesilo.reply.code.cdata), file=sys.stderr)
if xml.namesilo.reply.code.cdata != '280':
sys.exit(1)
f.close()
os.remove(idFile)
| 36.059524 | 80 | 0.690657 |
c73157f297cb593759f6f6026d9cae285aa78547 | 1,432 | py | Python | mrcnn/structs/tensor_container.py | darolt/mask_rcnn | 680e960ddc70ec912c4d7084b7b15c9e3e6632a7 | [
"MIT"
] | 40 | 2018-09-04T15:06:50.000Z | 2021-07-17T13:51:26.000Z | mrcnn/structs/tensor_container.py | conanhung/mask_rcnn-1 | fd594b726a33432d7d8d326bddf35d0093ad90fa | [
"MIT"
] | 6 | 2019-06-06T14:38:48.000Z | 2021-07-29T14:46:47.000Z | mrcnn/structs/tensor_container.py | conanhung/mask_rcnn-1 | fd594b726a33432d7d8d326bddf35d0093ad90fa | [
"MIT"
] | 9 | 2019-04-03T15:55:15.000Z | 2020-05-20T10:24:33.000Z | """
Tensor container is represents a set of tensors.
It is used by other structures to share common logic.
Licensed under The MIT License
Written by Jean Da Rolt
"""
class TensorContainer():
def to(self, device): # !pylint: disable=C0103
"""Apply pytorch's to() to all tensors in this container."""
for key, value in self.__dict__.items():
if key.startswith('_'): # do not change private attributes
continue
self.__dict__[key] = value.to(device)
return self
def cpu(self):
"""Apply pytorch's cpu() to all tensors in this container."""
for key, value in self.__dict__.items():
self.__dict__[key] = value.cpu()
return self
def numpy(self):
"""Apply pytorch's numpy() to all tensors in this container."""
for key, value in self.__dict__.items():
self.__dict__[key] = value.numpy()
return self
def select(self, keep):
"""Apply same indexing to all tensors in container"""
for key, value in self.__dict__.items():
self.__dict__[key] = value[keep]
return self
def __str__(self):
to_str = ''
for key, tensor in self.__dict__.items():
to_str += ' ' + key + ': ' + str(tensor.shape)
return to_str
def __len__(self):
for _, tensor in self.__dict__.items():
return tensor.shape[0]
| 31.130435 | 71 | 0.592877 |
0bb9d7a7c5b0c8c569ce65bd83b38022d5ac6f2a | 5,967 | py | Python | homeassistant/components/opengarage/cover.py | olbjan/home-assistant-1 | 1adb45f74e96fc5eff137a3727647a7e428e123c | [
"Apache-2.0"
] | 7 | 2019-02-07T14:14:12.000Z | 2019-07-28T06:56:10.000Z | homeassistant/components/opengarage/cover.py | olbjan/home-assistant-1 | 1adb45f74e96fc5eff137a3727647a7e428e123c | [
"Apache-2.0"
] | null | null | null | homeassistant/components/opengarage/cover.py | olbjan/home-assistant-1 | 1adb45f74e96fc5eff137a3727647a7e428e123c | [
"Apache-2.0"
] | null | null | null | """Platform for the opengarage.io cover component."""
import logging
import requests
import voluptuous as vol
from homeassistant.components.cover import (
DEVICE_CLASS_GARAGE,
PLATFORM_SCHEMA,
SUPPORT_CLOSE,
SUPPORT_OPEN,
CoverDevice,
)
from homeassistant.const import (
CONF_COVERS,
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_SSL,
CONF_VERIFY_SSL,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_DISTANCE_SENSOR = "distance_sensor"
ATTR_DOOR_STATE = "door_state"
ATTR_SIGNAL_STRENGTH = "wifi_signal"
CONF_DEVICE_KEY = "device_key"
DEFAULT_NAME = "OpenGarage"
DEFAULT_PORT = 80
STATES_MAP = {0: STATE_CLOSED, 1: STATE_OPEN}
COVER_SCHEMA = vol.Schema(
{
vol.Required(CONF_DEVICE_KEY): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_COVERS): cv.schema_with_slug_keys(COVER_SCHEMA)}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the OpenGarage covers."""
covers = []
devices = config.get(CONF_COVERS)
for device_config in devices.values():
args = {
CONF_NAME: device_config.get(CONF_NAME),
CONF_HOST: device_config.get(CONF_HOST),
CONF_PORT: device_config.get(CONF_PORT),
CONF_SSL: device_config.get(CONF_SSL),
CONF_VERIFY_SSL: device_config.get(CONF_VERIFY_SSL),
CONF_DEVICE_KEY: device_config.get(CONF_DEVICE_KEY),
}
covers.append(OpenGarageCover(args))
add_entities(covers, True)
class OpenGarageCover(CoverDevice):
"""Representation of a OpenGarage cover."""
def __init__(self, args):
"""Initialize the cover."""
self.opengarage_url = (
f"http{'s' if args[CONF_SSL] else ''}://"
f"{args[CONF_HOST]}:{args[CONF_PORT]}"
)
self._name = args[CONF_NAME]
self._device_key = args[CONF_DEVICE_KEY]
self._state = None
self._state_before_move = None
self._device_state_attributes = {}
self._available = True
self._verify_ssl = args[CONF_VERIFY_SSL]
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._device_state_attributes
@property
def is_closed(self):
"""Return if the cover is closed."""
if self._state is None:
return None
return self._state in [STATE_CLOSED, STATE_OPENING]
def close_cover(self, **kwargs):
"""Close the cover."""
if self._state in [STATE_CLOSED, STATE_CLOSING]:
return
self._state_before_move = self._state
self._state = STATE_CLOSING
self._push_button()
def open_cover(self, **kwargs):
"""Open the cover."""
if self._state in [STATE_OPEN, STATE_OPENING]:
return
self._state_before_move = self._state
self._state = STATE_OPENING
self._push_button()
def update(self):
"""Get updated status from API."""
try:
status = requests.get(f"{self.opengarage_url}/jc", timeout=10).json()
except requests.exceptions.RequestException as ex:
_LOGGER.error(
"Unable to connect to OpenGarage device: %(reason)s", dict(reason=ex)
)
self._available = False
return
if self._name is None and status["name"] is not None:
self._name = status["name"]
state = STATES_MAP.get(status.get("door"))
if self._state_before_move is not None:
if self._state_before_move != state:
self._state = state
self._state_before_move = None
else:
self._state = state
_LOGGER.debug("%s status: %s", self._name, self._state)
if status.get("rssi") is not None:
self._device_state_attributes[ATTR_SIGNAL_STRENGTH] = status.get("rssi")
if status.get("dist") is not None:
self._device_state_attributes[ATTR_DISTANCE_SENSOR] = status.get("dist")
if self._state is not None:
self._device_state_attributes[ATTR_DOOR_STATE] = self._state
self._available = True
def _push_button(self):
"""Send commands to API."""
result = -1
try:
result = requests.get(
f"{self.opengarage_url}/cc?dkey={self._device_key}&click=1",
timeout=10,
verify=self._verify_ssl,
).json()["result"]
except requests.exceptions.RequestException as ex:
_LOGGER.error(
"Unable to connect to OpenGarage device: %(reason)s", dict(reason=ex)
)
if result == 1:
return
if result == 2:
_LOGGER.error("Unable to control %s: Device key is incorrect", self._name)
elif result > 2:
_LOGGER.error("Unable to control %s: Error code %s", self._name, result)
self._state = self._state_before_move
self._state_before_move = None
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_GARAGE
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE
| 30.443878 | 86 | 0.627283 |
73a712943abfc1738ee31e4062e185c34d7da598 | 1,440 | py | Python | salt/output/yaml_out.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 2 | 2015-09-21T14:13:30.000Z | 2016-02-12T11:33:46.000Z | salt/output/yaml_out.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 1 | 2019-09-06T13:57:28.000Z | 2019-09-06T13:57:28.000Z | salt/output/yaml_out.py | pass-by-value/salt | 2ede44fe54516242e10fe428629d5f5a18e5f7ea | [
"Apache-2.0",
"MIT"
] | 2 | 2017-01-05T16:14:59.000Z | 2019-01-31T23:15:25.000Z | # -*- coding: utf-8 -*-
'''
Display return data in YAML format
==================================
This outputter defaults to printing in YAML block mode for better readability.
Example output::
saltmine:
foo:
bar: baz
dictionary:
abc: 123
def: 456
list:
- Hello
- World
'''
from __future__ import absolute_import
# Import third party libs
import logging
import yaml
# Import salt libs
from salt.utils.yamldumper import OrderedDumper
# Define the module's virtual name
__virtualname__ = 'yaml'
log = logging.getLogger(__name__)
def __virtual__():
return __virtualname__
def output(data, **kwargs): # pylint: disable=unused-argument
'''
Print out YAML using the block mode
'''
params = dict(Dumper=OrderedDumper)
if 'output_indent' not in __opts__:
# default indentation
params.update(default_flow_style=False)
elif __opts__['output_indent'] >= 0:
# custom indent
params.update(default_flow_style=False,
indent=__opts__['output_indent'])
else: # no indentation
params.update(default_flow_style=True,
indent=0)
try:
return yaml.dump(data, **params)
except Exception as exc:
import pprint
log.exception('Exception {0} encountered when trying to serialize {1}'.format(
exc, pprint.pformat(data)))
| 23.606557 | 86 | 0.622222 |
283c2528669b7de7f4ac93855bc8bfc9ed87c717 | 529 | py | Python | uploads/urls.py | rahulkushwah/appEngineTest | 2717d41cc0413045e42004c1cf93f04030d32891 | [
"MIT"
] | null | null | null | uploads/urls.py | rahulkushwah/appEngineTest | 2717d41cc0413045e42004c1cf93f04030d32891 | [
"MIT"
] | null | null | null | uploads/urls.py | rahulkushwah/appEngineTest | 2717d41cc0413045e42004c1cf93f04030d32891 | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from uploads.core import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^uploads/simple/$', views.simple_upload, name='simple_upload'),
#url(r'^uploads/form/$', views.model_form_upload, name='model_form_upload'),
url(r'^admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 29.388889 | 80 | 0.73535 |
f5a7f85f0b91cf4c68259a114270ff772ab6d03d | 9,587 | py | Python | word_sense/wordsense.py | logicalfarhad/word-sense-disambiguation | 7929f3a770f7628a4e0ab7c6f76c5964a90bae75 | [
"Apache-2.0"
] | 2 | 2020-04-03T15:38:25.000Z | 2021-02-01T09:36:51.000Z | word_sense/wordsense.py | logicalfarhad/word-sense-disambiguation | 7929f3a770f7628a4e0ab7c6f76c5964a90bae75 | [
"Apache-2.0"
] | null | null | null | word_sense/wordsense.py | logicalfarhad/word-sense-disambiguation | 7929f3a770f7628a4e0ab7c6f76c5964a90bae75 | [
"Apache-2.0"
] | 1 | 2019-04-14T22:06:21.000Z | 2019-04-14T22:06:21.000Z | import nltk
# nltk.download('wordnet')
# nltk.download('punkt')
# nltk.download('averaged_perceptron_tagger')
from nltk.corpus import wordnet as wn
import numpy as np
from numpy import dot
from numpy import average
from numpy.linalg import norm
import os
def load_glove_vectors(glove_file):
f = open(glove_file, 'r', encoding="utf-8")
vectors = {}
for line in f:
split_line = line.split()
word = split_line[0]
embedding = np.array([float(val) for val in split_line[1:]])
vectors[word] = embedding
f.close()
return vectors
cosine_sim_threshold = 0.05
score_margin_threshold = 0.1
def get_valid_pos_tag(tag):
if tag.startswith('J') or tag.startswith('V') or tag.startswith('N') or tag.startswith('R'):
return True
return False
def get_word_sense_vectors(candidate):
vectors = {}
try:
candidate_vec = glove[candidate]
except Exception:
# print(candidate, "not found in glove")
return None
for sense in wn.lemmas(candidate):
# if candidate == "bank":
# print("synonym of ", candidate, " is ", ss.lemmas()[0].name())
# print("key of ", candidate, " is ", ss.lemmas()[0].key())
gloss = [sense.synset().definition()]
gloss.extend(sense.synset().examples())
word_vectors = []
for sentence in gloss:
tokens = nltk.word_tokenize(sentence)
pos_tags = nltk.pos_tag(tokens)
for gloss_pos, tag in pos_tags:
if get_valid_pos_tag(tag):
try:
gloss_word_vec = glove[gloss_pos]
except Exception:
# print(gloss_pos, "not found in glove")
continue
cos_sim = dot(gloss_word_vec, candidate_vec) / (norm(gloss_word_vec) * norm(candidate_vec))
if cos_sim > cosine_sim_threshold:
word_vectors.append(gloss_word_vec)
if len(word_vectors) == 0:
continue
sense_vector = average(word_vectors, 0)
vectors[sense] = sense_vector
return vectors
def disambiguate_word_sense(word, context_vector):
vectors = sense_vectors_collection[word]
if len(vectors) == 0:
return [None, 0.0]
cos_sims = {}
for sense, sense_vector in vectors.items():
cos_sim = dot(context_vector, sense_vector) / (norm(context_vector) * norm(sense_vector))
cos_sims[sense] = cos_sim
sorted_list = sorted(cos_sims.items(), key=lambda x: x[1])
if len(sorted_list) == 0:
return [None, 0.0]
most_similar_pair = sorted_list.pop()
disambiguated_sense = most_similar_pair[0]
cos_sim_second_most_similar_sense = 0
if len(sorted_list) > 0:
cos_sim_second_most_similar_sense = sorted_list.pop()[1]
score_margin = most_similar_pair[1] - cos_sim_second_most_similar_sense
# we return the disambiguated sense AND the cosine score margin between the two most similar senses.
return [disambiguated_sense, score_margin]
# glove = load_glove_vectors('/media/iftekhar/New Volume/Personal/Admission Docs/Germany/RWTH/MI/Lab - AI_Language_Technology/training_nball47634/glove.6B.50d.txt')
# glove = load_glove_vectors('/media/iftekhar/New Volume/Personal/Admission Docs/Germany/RWTH/MI/Lab - AI_Language_Technology/deps.words')
# glove = load_glove_vectors('/media/iftekhar/New Volume/Personal/Admission Docs/Germany/RWTH/MI/Lab - AI_Language_Technology/bow2.words')
# glove = load_glove_vectors('/media/iftekhar/New Volume/Personal/Admission Docs/Germany/RWTH/MI/Lab - AI_Language_Technology/bow5.words')
glove = load_glove_vectors('E:/Code/deps.words/deps.words')
sense_vectors_collection = {}
annotation_results = dict()
def find_wn_key(sentence, lookup_word):
sorted_sense_vectors_collection = {}
pos = []
pos_vectors = {}
tokens_input = nltk.word_tokenize(sentence)
pos_tags_input = nltk.pos_tag(tokens_input)
for word, pos_tag in pos_tags_input:
# print(word, "is tagged as", pos_tag)
if get_valid_pos_tag(pos_tag):
try:
pos_vectors[word] = glove[word]
pos.append(word)
except Exception:
pass
# print(pos, " not found in glove")
for p in pos:
sense_vectors = get_word_sense_vectors(p)
if sense_vectors is None:
continue
sense_vectors_collection[p] = sense_vectors
sorted_sense_vectors_collection[p] = len(sense_vectors)
# S2C sorting for content word
sorted_sense_vectors_collection = sorted(sorted_sense_vectors_collection.items(), key=lambda x: x[1])
# print("sorted by sense count", sorted_sense_vectors_collection)
# Context vector initialization
context_vec = average(list(pos_vectors.values()), 0)
wn_key = "not found"
for w, _ in sorted_sense_vectors_collection:
disambiguation_results = disambiguate_word_sense(w, context_vec)
disambiguated_sense = disambiguation_results[0]
if disambiguated_sense is None:
continue
if w == lookup_word:
wn_key = disambiguated_sense._key
break
score_margin = disambiguation_results[1]
if score_margin > score_margin_threshold:
pos_vectors[w] = sense_vectors_collection[w][disambiguated_sense]
context_vec = average(list(pos_vectors.values()), 0)
# print(pos_vectors.keys())
sense_vectors_collection.clear()
return wn_key
def load_annotations():
path = "E:/Code/hlt2005releasev2/hlt2005releasev2/domainhltGS.tar/gold_standard_clean.txt"
with open(path, 'r', encoding='ISO-8859-1') as f:
for lines in f:
line = lines.split('|')
# print(line)
if len(line) < 4:
continue
linkup_key = line[1].strip()
# print("linkup key", linkup_key)
wn_key = line[2].strip()
# print("wn key", wn_key)
wn_keylist = list()
if linkup_key in annotation_results:
wn_keylist = annotation_results[linkup_key]
else:
annotation_results[linkup_key] = wn_keylist
if wn_key == "unclear":
continue
wn_keylist.append(wn_key)
# output_file = open("output_bow2_windows.txt", "w")
# results_file = open("wsd_results_bow2_windows.txt", "w")
# output_file = open("output_bow5_windows.txt", "w")
# results_file = open("wsd_results_bow5_windows.txt", "w")
# output_file = open("output_glove.6B.50d_windows.txt", "w")
# results_file = open("wsd_results_glove.6B.50d_windows.txt", "w")
output_file = open("output_deps_windows.txt", "w")
results_file = open("wsd_results_deps_windows.txt", "w")
load_annotations()
correct_count = 0
invalid_linkup_key_count = 0
total_sentence_count = 0
for dirpath, _, filenames in os.walk("E:/Code/hlt2005releasev2/hlt2005releasev2/domainhltGS.tar/Annotated_Sentences"):
if len(filenames) == 0:
continue
for file in filenames:
f = open(os.path.join(dirpath, file), 'r', encoding='ISO-8859-1')
#from itertools import islice
#for line in islice(f, 1):
for line in f:
split_line = line.split('?')
metadata_array = split_line[0].split(' ')
linkup_key = metadata_array[0]
lookup_word = metadata_array[2]
sentence = split_line[1].split(' ', 2)[2]
wn_key = find_wn_key(sentence, lookup_word)
results_file.write("|" + linkup_key + "| " + wn_key + "\n")
if linkup_key in annotation_results:
total_sentence_count += 1
wn_keylist = annotation_results[linkup_key]
if len(wn_keylist) > 0:
most_frequent_wn_key = max(set(wn_keylist), key=wn_keylist.count)
if most_frequent_wn_key == wn_key:
print("correct wsd for", linkup_key, wn_key)
output_file.write("correct wsd for " + linkup_key + " " + wn_key + "\n")
correct_count += 1
print("correct", correct_count, "| total", total_sentence_count)
output_file.write("correct " + str(correct_count) + " | total " + str(total_sentence_count) + "\n")
else:
print("incorrect wsd for", linkup_key, "| found", wn_key, ", correct is", most_frequent_wn_key)
output_file.write("incorrect wsd for " + linkup_key + " | found " + wn_key + ", correct is " + most_frequent_wn_key + "\n")
else:
invalid_linkup_key_count += 1
print("linkup key", linkup_key, "not found in gold standard clean dataset")
output_file.write("linkup key " + linkup_key + " not found in gold standard clean dataset\n")
results_file.close()
print("total invalid linkup key count", invalid_linkup_key_count)
output_file.write("total invalid linkup key count " + str(invalid_linkup_key_count) + "\n")
print("total correct", correct_count)
output_file.write("total correct " + str(correct_count) + "\n")
print("total sentences", total_sentence_count)
output_file.write("total sentences " + str(total_sentence_count) + "\n")
output_file.close()
| 42.799107 | 165 | 0.625013 |
ff348cd63cc44f7f1dfff63308e5ee46fbea2515 | 34,801 | py | Python | vipe_dataset/keypoint.py | jhong93/vpd | 1ed3e8631c46e078ecb9a7756dba1f1c14aead5b | [
"BSD-3-Clause"
] | 7 | 2021-11-26T01:15:23.000Z | 2022-03-15T10:51:47.000Z | vipe_dataset/keypoint.py | jhong93/vpd | 1ed3e8631c46e078ecb9a7756dba1f1c14aead5b | [
"BSD-3-Clause"
] | 4 | 2022-01-15T09:46:00.000Z | 2022-02-05T07:10:18.000Z | vipe_dataset/keypoint.py | jhong93/vpd | 1ed3e8631c46e078ecb9a7756dba1f1c14aead5b | [
"BSD-3-Clause"
] | 1 | 2021-09-18T16:50:14.000Z | 2021-09-18T16:50:14.000Z | import os
import math
import random
from collections import Counter, defaultdict
import torch
from torch.utils.data import Dataset
import numpy as np
from tqdm import tqdm
from . import human36m, people3d, nba2k, amass
from .util import flip_skeleton_offsets
from .dataset_base import (
D3KeypointDataset, MAX_NEG_SAMPLE_TRIES, is_good_3d_neg_sample,
normalize_3d_offsets, normalize_2d_skeleton, get_3d_features,
NUM_COCO_KEYPOINTS_ORIG)
from util.io import load_pickle, load_gz_json
USE_EXTREMITIES = True
USE_ROOT_DIRECTIONS = True
CAMERA_AUG_ELEVATION_RANGE = (-np.pi / 6, np.pi / 6)
CAMERA_AUG_ROLL_RANGE = (-np.pi / 6, np.pi / 6)
def _random_project_3d(coco_xyz, elevation=None, roll=None):
# Point the camera at the torso
# coco_xyz -= np.mean([
# skl.left_arm, skl.right_arm, skl.left_up_leg, skl.right_up_leg],
# axis=0)
# Rotate around z
a = np.random.uniform(-np.pi, np.pi)
cos_a = math.cos(a)
sin_a = math.sin(a)
rot_z_t = np.array([
[cos_a, sin_a, 0],
[-sin_a, cos_a, 0],
[0, 0, 1]
])
coco_xyz = coco_xyz.dot(rot_z_t)
if elevation is not None:
# Rotate around x
b = np.random.uniform(*elevation)
cos_b = math.cos(b)
sin_b = math.sin(b)
rot_x_t = np.array([
[1, 0, 0],
[0, cos_b, sin_b],
[0, -sin_b, cos_b],
])
coco_xyz = coco_xyz.dot(rot_x_t)
if roll is not None:
# Rotate around y
c = np.random.uniform(*roll)
cos_c = math.cos(c)
sin_c = math.sin(c)
rot_y_t = np.array([
[cos_c, 0, sin_c],
[0, 1, 0],
[-sin_c, 0, cos_c],
])
coco_xyz = coco_xyz.dot(rot_y_t)
# Randomize confidence scores
conf = np.random.uniform(0.5, 1, size=NUM_COCO_KEYPOINTS_ORIG)
conf[1:5] = 0
# Project into 2D
coco_xzc = np.hstack((coco_xyz[:, [0, 2]], conf[:, None]))
# Invert z to convert to pixel coordinates
coco_xzc[:, 1] *= -1
assert coco_xzc.shape == (NUM_COCO_KEYPOINTS_ORIG, 3)
return coco_xzc
def _sample_camera_pair(all_cameras_and_2d_poses):
if len(all_cameras_and_2d_poses) > 1:
views = np.random.choice(
range(len(all_cameras_and_2d_poses)), 2, replace=False)
else:
views = (0, 0)
camera1, pose_2d1 = all_cameras_and_2d_poses[views[0]]
camera2, pose_2d2 = all_cameras_and_2d_poses[views[1]]
return camera1, camera2, pose_2d1, pose_2d2
class Human36MDataset(D3KeypointDataset):
def get_sequence(self, index, camera=None, stride=25):
(person, action), frames = self.get(index)
seq_poses = self.poses_3d[(person, action)]
sequence = []
for i, (frame_num, all_cameras_and_2d_poses) in enumerate(frames):
if i % stride != 0:
continue
if camera is None:
# Choose a random camera
camera, pose2d = random.choice(all_cameras_and_2d_poses)
else:
for camera2, pose2d in all_cameras_and_2d_poses:
if camera2 == camera:
break
else:
continue
# Load 3d ground truth
if frame_num >= len(seq_poses):
print('Invalid frame: {} > {} (max_frame: {})'.format(
frame_num, len(seq_poses), frames[-1][0]))
break
_, rotation, abs_kp_offsets = seq_poses[frame_num]
norm_kp_offsets, kp_dists = normalize_3d_offsets(abs_kp_offsets)
sequence.append({
'person': person,
'action': action,
'frame': frame_num,
'rotation': rotation,
'kp_offsets': norm_kp_offsets,
'kp_offset_norms': kp_dists,
'camera': camera,
'pose': normalize_2d_skeleton(
pose2d, False, include_bone_features=self.embed_bones)
})
return sequence
@staticmethod
def _random_project_3d(raw_kp_offsets):
skl = human36m.decode_skeleton_from_offsets(raw_kp_offsets)
coco_xyz = np.stack([
skl.nose,
skl.nose, # No eyes in h36m
skl.nose,
skl.nose, # No ears in h36m
skl.nose,
skl.left_arm,
skl.right_arm,
skl.left_forearm,
skl.right_forearm,
skl.left_hand,
skl.right_hand,
skl.left_up_leg,
skl.right_up_leg,
skl.left_leg,
skl.right_leg,
skl.left_foot,
skl.right_foot,
])
return _random_project_3d(
coco_xyz, elevation=CAMERA_AUG_ELEVATION_RANGE,
roll=CAMERA_AUG_ROLL_RANGE)
def _get_negative_sample(self, frames, seq_poses, norm_kp_offsets):
# Try to find a pose in the same sequence that differs the current one
neg_flip = False
for _ in range(MAX_NEG_SAMPLE_TRIES):
neg_frame_num, neg_cameras_and_2d_poses = random.choice(frames)
if neg_frame_num >= len(seq_poses):
continue
neg_raw_offsets = seq_poses[neg_frame_num][-1]
neg_flip = self._should_flip()
if is_good_3d_neg_sample(
normalize_3d_offsets(
flip_skeleton_offsets(neg_raw_offsets, human36m.XFLIP_ROWS)
if neg_flip else neg_raw_offsets
)[0],
norm_kp_offsets,
ignore=None if USE_EXTREMITIES else human36m.EXTREMITY_ROWS
):
if self._should_project():
# Need to project with 3d before flipping
neg_pose2d = Human36MDataset._random_project_3d(neg_raw_offsets)
else:
neg_pose2d = random.choice(neg_cameras_and_2d_poses)[1]
break
else:
neg_pose2d = None
self._log_neg_sample_fail()
return neg_pose2d, neg_flip
def __getitem__(self, index):
self.sample_count += 1
(person, action), frames = self.get(index)
seq_poses = self.poses_3d[(person, action)]
flip = self._should_flip()
while True:
frame_num, all_cameras_and_2d_poses = random.choice(frames)
if frame_num < len(seq_poses):
break
assert len(all_cameras_and_2d_poses) > 0
# Load 3d ground truth
_, rotation, raw_kp_offsets = seq_poses[frame_num]
# Flip and normalize 3D
abs_kp_offsets = raw_kp_offsets
if flip:
rotation = -rotation
abs_kp_offsets = flip_skeleton_offsets(
abs_kp_offsets, human36m.XFLIP_ROWS)
# Sample two random cameras
camera1, camera2, pose_2d1, pose_2d2 = _sample_camera_pair(
all_cameras_and_2d_poses)
# Replace with random projections if enabled
if self._should_project():
camera1 = ''
pose_2d1 = Human36MDataset._random_project_3d(raw_kp_offsets)
if self._should_project():
camera2 = ''
pose_2d2 = Human36MDataset._random_project_3d(raw_kp_offsets)
# Negative sample
neg_pose2d, neg_flip = self._get_negative_sample(
frames, seq_poses, normalize_3d_offsets(abs_kp_offsets)[0])
norm_pose1 = normalize_2d_skeleton(
pose_2d1, flip, include_bone_features=self.embed_bones)
ret = {
'kp_features': get_3d_features(
abs_kp_offsets, human36m, include_extremities=USE_EXTREMITIES,
include_root_directions=USE_ROOT_DIRECTIONS),
'pose1': norm_pose1,
'pose2': normalize_2d_skeleton(
pose_2d2, flip, include_bone_features=self.embed_bones),
'pose_neg': torch.zeros_like(norm_pose1) if neg_pose2d is None
else normalize_2d_skeleton(
neg_pose2d, neg_flip,
include_bone_features=self.embed_bones),
'pose_neg_is_valid': int(neg_pose2d is not None)
}
if self.debug_info:
ret.update({
'person': person,
'action': action,
'frame': frame_num,
'rotation': rotation,
'camera1': camera1,
'camera2': camera2,
'is_flip': flip
})
return ret
@staticmethod
def load_default(pose_2d_dir, pose_3d_file, embed_bones, augment_camera):
# These do not have 3d poses
exclude_actions = {'_ALL', '_ALL 1'}
pose_2d = defaultdict(lambda: defaultdict(list))
for pose_2d_file in tqdm(
os.listdir(pose_2d_dir), desc='Loading human3.6m'
):
person, action, camera, _ = pose_2d_file.split('.', 3)
if action in exclude_actions:
continue
seq_pose = load_gz_json(os.path.join(pose_2d_dir, pose_2d_file))
for frame, pose_data in seq_pose:
if len(pose_data) > 0:
kp = np.array(pose_data[0][-1], dtype=np.float32)
pose_2d[(person, action)][frame].append((camera, kp))
pose_2d = [(k, list(v.items())) for k, v in pose_2d.items()]
pose_3d = load_pickle(pose_3d_file)
all_people = {x[0][0] for x in pose_2d}
val_people = {'S9', 'S11'}
print('{} / {} people reserved for validation'.format(
len(val_people), len(all_people)))
assert val_people <= all_people
train_2d = [x for x in pose_2d if x[0][0] not in val_people]
train_2d.sort()
train_dataset = Human36MDataset(
train_2d, pose_3d, True, augment_camera, embed_bones, 20000)
val_2d = [x for x in pose_2d if x[0][0] in val_people]
val_2d.sort()
val_dataset = Human36MDataset(
val_2d, pose_3d, True, augment_camera, embed_bones, 2000)
return train_dataset, val_dataset
# Common format for amass, 3dpeople, and nba2k
def _load_person_poses(pose_2d_dir, pose_2d_file):
person_pose = []
for frame, all_camera_pose_data in sorted(
load_gz_json(os.path.join(pose_2d_dir, pose_2d_file))
):
frame_camera_pose = []
for camera, pose_data in all_camera_pose_data:
assert len(pose_data) > 0
if len(pose_data) > 0:
kp = np.array(pose_data[-1], dtype=np.float32)
frame_camera_pose.append((camera, kp))
person_pose.append((frame, frame_camera_pose))
assert len(person_pose) > 0
return person_pose
class NBA2kDataset(D3KeypointDataset):
CAMERA_AUG_PROB = 0.5
CAMERA_AUG_ELEVATION_RANGE = (-np.pi / 6, np.pi / 6)
@staticmethod
def _random_project_3d(raw_kp_offsets):
skl = nba2k.decode_skeleton_from_offsets(raw_kp_offsets)
coco_xyz = np.stack([
skl.nose,
skl.leye,
skl.reye,
skl.lear,
skl.rear,
skl.lshoulder,
skl.rshoulder,
skl.lelbow,
skl.relbow,
skl.lwrist,
skl.rwrist,
skl.lhip,
skl.rhip,
skl.lknee,
skl.rknee,
skl.lankle,
skl.rankle,
])
return _random_project_3d(
coco_xyz, elevation=CAMERA_AUG_ELEVATION_RANGE,
roll=CAMERA_AUG_ROLL_RANGE)
def get_sequence(self, index, camera=None, stride=4):
person_key, frame_data = self.get(index)
person_3d_poses = self.poses_3d[person_key]
sequence = []
for i, (frame_num, all_cameras_and_poses) in enumerate(frame_data):
if i % stride != 0:
continue
# Load 3d ground truth
_, rotation, abs_kp_offsets = person_3d_poses[frame_num]
norm_kp_offsets, kp_dists = normalize_3d_offsets(abs_kp_offsets)
sequence.append({
'person': person_key[0],
'action': '',
'camera': '',
'frame': frame_num,
'rotation': rotation,
'kp_offsets': norm_kp_offsets,
'kp_offset_norms': kp_dists,
'pose': normalize_2d_skeleton(
all_cameras_and_poses[0][-1], False,
include_bone_features=self.embed_bones)
})
return sequence
def _get_negative_sample(self, frame_data, seq_poses, norm_kp_offsets):
# Try to find a pose in the same sequence that differs the current one
neg_flip = False
for _ in range(MAX_NEG_SAMPLE_TRIES):
neg_frame_num, _ = random.choice(frame_data)
neg_raw_offsets = seq_poses[neg_frame_num][-1]
neg_flip = self._should_flip()
if is_good_3d_neg_sample(
normalize_3d_offsets(
flip_skeleton_offsets(neg_raw_offsets, nba2k.XFLIP_ROWS)
if neg_flip else neg_raw_offsets
)[0],
norm_kp_offsets,
ignore=None if USE_EXTREMITIES else nba2k.EXTREMITY_ROWS
):
neg_pose2d = NBA2kDataset._random_project_3d(neg_raw_offsets)
break
else:
neg_pose2d = None
self._log_neg_sample_fail()
return neg_pose2d, neg_flip
def __getitem__(self, index):
self.sample_count += 1
person_key, frame_data = self.get(index)
person_3d_poses = self.poses_3d[person_key]
frame_num, all_cameras_and_poses = random.choice(frame_data)
pose_2d = all_cameras_and_poses[0][-1]
flip = self._should_flip()
# Load 3d ground truth
_, rotation, raw_kp_offsets = person_3d_poses[frame_num]
# Flip and normalize 3D
abs_kp_offsets = raw_kp_offsets
if flip:
rotation = -rotation
abs_kp_offsets = flip_skeleton_offsets(
abs_kp_offsets, nba2k.XFLIP_ROWS)
if self._should_project():
pose_2d = NBA2kDataset._random_project_3d(raw_kp_offsets)
ret = {'kp_features': get_3d_features(
abs_kp_offsets, nba2k, include_extremities=USE_EXTREMITIES,
include_root_directions=USE_ROOT_DIRECTIONS),
'pose1': normalize_2d_skeleton(
pose_2d, flip, include_bone_features=self.embed_bones)}
if self.augment_camera:
pose_2d2 = NBA2kDataset._random_project_3d(raw_kp_offsets)
ret['pose2'] = normalize_2d_skeleton(
pose_2d2, flip, include_bone_features=self.embed_bones)
neg_pose2d, neg_flip = self._get_negative_sample(
frame_data, person_3d_poses,
normalize_3d_offsets(abs_kp_offsets)[0])
ret['pose_neg'] = (
torch.zeros_like(pose_2d) if neg_pose2d is None else
normalize_2d_skeleton(neg_pose2d, neg_flip,
include_bone_features=self.embed_bones))
ret['pose_neg_is_valid'] = int(neg_pose2d is not None)
if self.debug_info:
ret.update({
'person': person_key[0],
'action': '',
'frame': frame_num,
'rotation': rotation,
'camera1': '', 'camera2': '',
'is_flip': flip
})
return ret
@staticmethod
def load_default(pose_2d_dir, pose_3d_file, embed_bones):
pose_3d = load_pickle(pose_3d_file)
pose_2d = []
for pose_2d_file in tqdm(os.listdir(pose_2d_dir), desc='Loading NBA2K'):
person = pose_2d_file.split('.', 1)[0]
pose_2d.append((
(person,), _load_person_poses(pose_2d_dir, pose_2d_file)))
all_people = {x[0][0] for x in pose_2d}
val_people = {'alfred', 'allen', 'barney', 'bradley'}
print('{} / {} people reserved for validation'.format(
len(val_people), len(all_people)))
assert val_people <= all_people
train_2d = [x for x in pose_2d if x[0][0] not in val_people]
train_2d.sort()
train_dataset = NBA2kDataset(
train_2d, pose_3d, True, True, embed_bones, 5000)
val_2d = [x for x in pose_2d if x[0][0] in val_people]
val_2d.sort()
val_dataset = NBA2kDataset(
val_2d, pose_3d, True, True, embed_bones, 500)
return train_dataset, val_dataset
class People3dDataset(D3KeypointDataset):
def get_sequence(self, index, camera=None, stride=2):
(person, action), frame_data = self.get(index)
seq_poses = self.poses_3d[(person, action)]
sequence = []
for i, (frame_num, all_cameras_and_2d_poses) in enumerate(frame_data):
if i % stride != 0:
continue
if camera is None:
# Choose a random camera
camera, pose2d = random.choice(all_cameras_and_2d_poses)
else:
for camera2, pose2d in all_cameras_and_2d_poses:
if camera2 == camera:
break
else:
continue
# Load 3d ground truth
_, rotation, abs_kp_offsets = seq_poses[frame_num - 1]
norm_kp_offsets, kp_dists = normalize_3d_offsets(abs_kp_offsets)
sequence.append({
'person': person,
'action': action,
'frame': frame_num,
'rotation': rotation,
'kp_offsets': norm_kp_offsets,
'kp_offset_norms': kp_dists,
'camera': camera,
'pose': normalize_2d_skeleton(
pose2d, False, include_bone_features=self.embed_bones)
})
return sequence
@staticmethod
def _random_project_3d(raw_kp_offsets):
skl = people3d.decode_skeleton_from_offsets(raw_kp_offsets)
coco_xyz = np.stack([
(skl.head + skl.left_eye + skl.right_eye) / 3,
skl.left_eye,
skl.right_eye,
skl.left_eye, # No ears in 3dpeople
skl.right_eye,
skl.left_arm,
skl.right_arm,
skl.left_forearm,
skl.right_forearm,
skl.left_hand,
skl.right_hand,
skl.left_up_leg,
skl.right_up_leg,
skl.left_leg,
skl.right_leg,
skl.left_foot,
skl.right_foot,
])
return _random_project_3d(
coco_xyz, elevation=CAMERA_AUG_ELEVATION_RANGE,
roll=CAMERA_AUG_ROLL_RANGE)
def _get_negative_sample(self, frame_data, seq_poses, norm_kp_offsets):
# Try to find a pose in the same sequence that differs the current one
neg_flip = False
for _ in range(MAX_NEG_SAMPLE_TRIES):
neg_frame_num, neg_cameras_and_2d_poses = random.choice(frame_data)
neg_raw_offsets = seq_poses[neg_frame_num - 1][-1]
neg_flip = self._should_flip()
if is_good_3d_neg_sample(
normalize_3d_offsets(
flip_skeleton_offsets(neg_raw_offsets, people3d.XFLIP_ROWS)
if neg_flip else neg_raw_offsets
)[0],
norm_kp_offsets,
ignore=None if USE_EXTREMITIES else people3d.EXTREMITY_ROWS
):
if self._should_project():
neg_pose2d = People3dDataset._random_project_3d(neg_raw_offsets)
else:
neg_pose2d = random.choice(neg_cameras_and_2d_poses)[1]
break
else:
neg_pose2d = None
self._log_neg_sample_fail()
return neg_pose2d, neg_flip
def __getitem__(self, index):
self.sample_count += 1
(person, action), frame_data = self.get(index)
seq_poses = self.poses_3d[(person, action)]
flip = self._should_flip()
frame_num, all_cameras_and_2d_poses = random.choice(frame_data)
assert len(all_cameras_and_2d_poses) > 0
# Load 3d ground truth
_, rotation, raw_kp_offsets = seq_poses[frame_num - 1]
# Flip and normalize 3D
abs_kp_offsets = raw_kp_offsets
if flip:
rotation = -rotation
abs_kp_offsets = flip_skeleton_offsets(
abs_kp_offsets, people3d.XFLIP_ROWS)
# Sample two random cameras
camera1, camera2, pose_2d1, pose_2d2 = _sample_camera_pair(
all_cameras_and_2d_poses)
# Replace with projections if needed
if self._should_project():
camera1 = ''
pose_2d1 = People3dDataset._random_project_3d(raw_kp_offsets)
if self._should_project():
camera2 = ''
pose_2d2 = People3dDataset._random_project_3d(raw_kp_offsets)
# Get negative sample
neg_pose2d, neg_flip = self._get_negative_sample(
frame_data, seq_poses, normalize_3d_offsets(abs_kp_offsets)[0])
norm_pose1 = normalize_2d_skeleton(
pose_2d1, flip, include_bone_features=self.embed_bones)
ret = {
'kp_features': get_3d_features(
abs_kp_offsets, people3d, include_extremities=USE_EXTREMITIES,
include_root_directions=USE_ROOT_DIRECTIONS),
'pose1': norm_pose1,
'pose2': normalize_2d_skeleton(
pose_2d2, flip, include_bone_features=self.embed_bones),
'pose_neg': torch.zeros_like(norm_pose1) if neg_pose2d is None
else normalize_2d_skeleton(
neg_pose2d, neg_flip,
include_bone_features=self.embed_bones),
'pose_neg_is_valid': int(neg_pose2d is not None)
}
if self.debug_info:
ret.update({
'person': person,
'action': action,
'frame': frame_num,
'rotation': rotation,
'camera1': camera1,
'camera2': camera2,
'is_flip': flip
})
return ret
@staticmethod
def load_default(pose_2d_dir, pose_3d_file, embed_bones, augment_camera):
pose_2d = []
for pose_2d_file in tqdm(
os.listdir(pose_2d_dir), desc='Loading 3D people'
):
person, action = pose_2d_file.split('.', 1)[0].split('__', 1)
pose_2d.append(((person, action),
_load_person_poses(pose_2d_dir, pose_2d_file)))
pose_3d = load_pickle(pose_3d_file)
all_people = {x[0][0] for x in pose_2d}
val_people = set()
for s in ['man', 'woman']:
val_people.update(['{}{:02d}'.format(s, i + 1) for i in range(4)])
print('{} / {} people reserved for validation'.format(
len(val_people), len(all_people)))
assert val_people <= all_people
train_2d = [x for x in pose_2d if x[0][0] not in val_people]
train_2d.sort()
train_dataset = People3dDataset(
train_2d, pose_3d, True, augment_camera, embed_bones, 5000)
val_2d = [x for x in pose_2d if x[0][0] in val_people]
val_2d.sort()
val_dataset = People3dDataset(
val_2d, pose_3d, True, augment_camera, embed_bones, 500)
return train_dataset, val_dataset
class AmassDataset(D3KeypointDataset):
CAMERA_AUG_ELEVATION_RANGE = (-np.pi / 6, np.pi / 6)
idx_stride = 25
sample_weights = {
'ACCAD': 1,
'BMLhandball': 1,
'BMLmovi': 1,
'BMLrub': 1,
'CMU': 1,
'DFaust67': 1,
'EKUT': 1,
'EyesJapanDataset': 1,
'HumanEva': 1,
'KIT': 1,
'MPIHDM05': 10,
'MPILimits': 10,
'MPImosh': 10,
'SFU': 1,
'SSMsynced': 1,
'TCDhandMocap': 1,
'TotalCapture': 1,
'Transitionsmocap': 1
}
@staticmethod
def _idx(frame_num):
return frame_num // AmassDataset.idx_stride
def get_sequence(self, index, camera=None, stride=25):
(dataset, action), frame_data = self.get(index)
seq_poses = self.poses_3d[(dataset, action)]
sequence = []
for i, (frame_num, all_cameras_and_2d_poses) in enumerate(frame_data):
if i % stride != 0:
continue
camera, pose2d = random.choice(all_cameras_and_2d_poses)
# Load 3d ground truth
_, rotation, abs_kp_offsets = seq_poses[AmassDataset._idx(frame_num)]
norm_kp_offsets, kp_dists = normalize_3d_offsets(abs_kp_offsets)
sequence.append({
'person': dataset,
'action': action,
'frame': frame_num,
'rotation': rotation,
'kp_offsets': norm_kp_offsets,
'kp_offset_norms': kp_dists,
'camera': camera,
'pose': normalize_2d_skeleton(
pose2d, False, include_bone_features=self.embed_bones)
})
return sequence
@staticmethod
def _random_project_3d(raw_kp_offsets):
skl = amass.decode_skeleton_from_offsets(raw_kp_offsets)
nose = (skl.head_top + skl.head) / 2
coco_xyz = np.stack([
nose,
nose, # No eyes in amass
nose,
nose, # No ears in amass
nose,
skl.l_shoulder,
skl.r_shoulder,
skl.l_elbow,
skl.r_elbow,
skl.l_wrist,
skl.r_wrist,
skl.l_hip,
skl.r_hip,
skl.l_knee,
skl.r_knee,
skl.l_ankle,
skl.r_ankle,
])
return _random_project_3d(
coco_xyz, elevation=CAMERA_AUG_ELEVATION_RANGE,
roll=CAMERA_AUG_ROLL_RANGE)
def _get_negative_sample(self, frame_data, seq_poses, norm_kp_offsets):
# Try to find a pose in the same sequence that differs the current one
neg_flip = False
for _ in range(MAX_NEG_SAMPLE_TRIES):
neg_frame_num, neg_cameras_and_2d_poses = random.choice(frame_data)
neg_raw_offsets = seq_poses[AmassDataset._idx(neg_frame_num)][-1]
neg_flip = self._should_flip()
if is_good_3d_neg_sample(
normalize_3d_offsets(
flip_skeleton_offsets(neg_raw_offsets, amass.XFLIP_ROWS)
if neg_flip else neg_raw_offsets
)[0],
norm_kp_offsets,
ignore=None if USE_EXTREMITIES else amass.EXTREMITY_ROWS
):
if self._should_project():
neg_pose2d = AmassDataset._random_project_3d(neg_raw_offsets)
else:
neg_pose2d = random.choice(neg_cameras_and_2d_poses)[1]
break
else:
neg_pose2d = None
self._log_neg_sample_fail()
return neg_pose2d, neg_flip
def __getitem__(self, index):
self.sample_count += 1
(dataset, action), frame_data = self.get(index)
seq_poses = self.poses_3d[(dataset, action)]
flip = self._should_flip()
frame_num, all_cameras_and_2d_poses = random.choice(frame_data)
assert len(all_cameras_and_2d_poses) > 0
# Load 3d ground truth
_, rotation, raw_kp_offsets = seq_poses[AmassDataset._idx(frame_num)]
# Flip and normalize 3D
abs_kp_offsets = raw_kp_offsets
if flip:
rotation = -rotation
abs_kp_offsets = flip_skeleton_offsets(
abs_kp_offsets, amass.XFLIP_ROWS)
# Sample two random cameras
camera1, camera2, pose_2d1, pose_2d2 = _sample_camera_pair(
all_cameras_and_2d_poses)
# Replace with projections if needed
if self._should_project():
camera1 = ''
pose_2d1 = AmassDataset._random_project_3d(raw_kp_offsets)
if self._should_project():
camera2 = ''
pose_2d2 = AmassDataset._random_project_3d(raw_kp_offsets)
# Get negative sample
neg_pose2d, neg_flip = self._get_negative_sample(
frame_data, seq_poses, normalize_3d_offsets(abs_kp_offsets)[0])
norm_pose1 = normalize_2d_skeleton(
pose_2d1, flip, include_bone_features=self.embed_bones)
ret = {
'kp_features': get_3d_features(
abs_kp_offsets, amass, include_extremities=USE_EXTREMITIES,
include_root_directions=USE_ROOT_DIRECTIONS),
'pose1': norm_pose1,
'pose2': normalize_2d_skeleton(
pose_2d2, flip, include_bone_features=self.embed_bones),
'pose_neg': torch.zeros_like(norm_pose1) if neg_pose2d is None
else normalize_2d_skeleton(
neg_pose2d, neg_flip,
include_bone_features=self.embed_bones),
'pose_neg_is_valid': int(neg_pose2d is not None)
}
if self.debug_info:
ret.update({
'person': dataset,
'action': action,
'frame': frame_num,
'rotation': rotation,
'camera1': camera1,
'camera2': camera2,
'is_flip': flip
})
return ret
@staticmethod
def load_default(pose_2d_dir, pose_3d_file, embed_bones, augment_camera):
pose_2d = []
for pose_2d_file in tqdm(
os.listdir(pose_2d_dir), desc='Loading AMASS'
):
dataset, action = pose_2d_file.split('.', 1)[0].split('_', 1)
pose_2d.append(((dataset, action),
_load_person_poses(pose_2d_dir, pose_2d_file)))
pose_3d = load_pickle(pose_3d_file)
# Stride over subsampled datasets
dataset_counter = Counter()
all_datasets = set()
all_sequences = []
for item in pose_2d:
dataset = item[0][0]
dataset_weight = AmassDataset.sample_weights[dataset]
if dataset_weight >= 1:
for _ in range(round(dataset_weight)):
all_sequences.append(item)
else:
if dataset_counter[dataset] % round(1 / dataset_weight) == 0:
all_sequences.append(item)
dataset_counter[dataset] += 1
all_datasets.add(dataset)
val_datasets = {'EyesJapanDataset'}
print('{} / {} datasets reserved for validation'.format(
len(val_datasets), len(all_datasets)))
assert val_datasets <= all_datasets
train_2d = [x for x in pose_2d if x[0][0] not in val_datasets]
train_2d.sort()
train_dataset = AmassDataset(
train_2d, pose_3d, True, augment_camera, embed_bones, 20000)
val_2d = [x for x in pose_2d if x[0][0] in val_datasets]
val_2d.sort()
val_dataset = AmassDataset(
val_2d, pose_3d, True, augment_camera, embed_bones, 2000)
return train_dataset, val_dataset
class Pairwise_People3dDataset(Dataset):
def __init__(self, pose_2d, scale, embed_bones, random_hflip=True):
super().__init__()
self.random_hflip = random_hflip
self.embed_bones = embed_bones
# (person, action) -> frames
self.point_dict = {
tuple(a): (
[x[0] for x in b], # Frame list
dict(b) # Frame to cameras
) for a, b in pose_2d
}
self.people = list(set(x[0] for x in self.point_dict))
self.actions = list(set(x[1] for x in self.point_dict))
self.scale = scale
def __len__(self):
return len(self.actions) * self.scale
def _should_flip(self):
return self.random_hflip and random.getrandbits(1) > 0
def __getitem__(self, index):
action = self.actions[index % len(self.actions)]
person1, person2 = np.random.choice(
self.people, 2, replace=False).tolist()
frames1, frame_to_cameras1 = self.point_dict[(person1, action)]
_, frame_to_cameras2 = self.point_dict[(person2, action)]
for i in range(1000):
if i == 10:
print('Why is this taking so many tries?',
person1, person2, action)
frame_num = random.choice(frames1)
# No camera is available for person2; try again
all_cameras2 = frame_to_cameras2.get(frame_num)
if all_cameras2 is None:
continue
# Sample the cameras
pose_2d1 = random.choice(frame_to_cameras1[frame_num])[1]
pose_2d2 = random.choice(all_cameras2)[1]
break
else:
raise RuntimeError('This dataset is really borked...')
flip = self._should_flip()
return {
'pose1': normalize_2d_skeleton(
pose_2d1, flip, include_bone_features=self.embed_bones),
'pose2': normalize_2d_skeleton(
pose_2d2, flip, include_bone_features=self.embed_bones),
'is_same': True, 'is_flip': flip
}
@staticmethod
def load_default(pose_2d_dir, scale, embed_bones):
pose_2d = []
for pose_2d_file in tqdm(
os.listdir(pose_2d_dir), desc='Loading 3D people (Pairs)'
):
person, action = pose_2d_file.split('.', 1)[0].split('__', 1)
pose_2d.append(((person, action),
_load_person_poses(pose_2d_dir, pose_2d_file)))
all_people = {x[0][0] for x in pose_2d}
val_people = set()
for s in ['man', 'woman']:
val_people.update(['{}{:02d}'.format(s, i + 1) for i in range(4)])
print('{} / {} people reserved for validation'.format(
len(val_people), len(all_people)))
assert val_people <= all_people
train_2d = [x for x in pose_2d if x[0][0] not in val_people]
train_2d.sort()
train_dataset = Pairwise_People3dDataset(train_2d, scale, embed_bones)
val_2d = [x for x in pose_2d if x[0][0] in val_people]
val_2d.sort()
val_dataset = Pairwise_People3dDataset(
val_2d, int(scale * 0.2), embed_bones)
return train_dataset, val_dataset | 36.479036 | 84 | 0.574983 |
d1d5ad5909c1f7aeff2c57a2a2ce89566588da27 | 2,686 | py | Python | platformio/commands/settings.py | awachtler/platformio-core | bf9552bd562bc15fe5912631e6da378b2088f7db | [
"Apache-2.0"
] | null | null | null | platformio/commands/settings.py | awachtler/platformio-core | bf9552bd562bc15fe5912631e6da378b2088f7db | [
"Apache-2.0"
] | null | null | null | platformio/commands/settings.py | awachtler/platformio-core | bf9552bd562bc15fe5912631e6da378b2088f7db | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from platformio import app
@click.group(short_help="Manage PlatformIO settings")
def cli():
pass
@cli.command("get", short_help="Get existing setting/-s")
@click.argument("name", required=False)
def settings_get(name):
list_tpl = "{name:<40} {value:<35} {description}"
terminal_width, _ = click.get_terminal_size()
click.echo(
list_tpl.format(name=click.style("Name", fg="cyan"),
value=(click.style("Value", fg="green") +
click.style(" [Default]", fg="yellow")),
description="Description"))
click.echo("-" * terminal_width)
for _name, _data in sorted(app.DEFAULT_SETTINGS.items()):
if name and name != _name:
continue
_value = app.get_setting(_name)
_value_str = str(_value)
if isinstance(_value, bool):
_value_str = "Yes" if _value else "No"
_value_str = click.style(_value_str, fg="green")
if _value != _data['value']:
_defvalue_str = str(_data['value'])
if isinstance(_data['value'], bool):
_defvalue_str = "Yes" if _data['value'] else "No"
_value_str += click.style(" [%s]" % _defvalue_str, fg="yellow")
else:
_value_str += click.style(" ", fg="yellow")
click.echo(
list_tpl.format(name=click.style(_name, fg="cyan"),
value=_value_str,
description=_data['description']))
@cli.command("set", short_help="Set new value for the setting")
@click.argument("name")
@click.argument("value")
@click.pass_context
def settings_set(ctx, name, value):
app.set_setting(name, value)
click.secho("The new value for the setting has been set!", fg="green")
ctx.invoke(settings_get, name=name)
@cli.command("reset", short_help="Reset settings to default")
@click.pass_context
def settings_reset(ctx):
app.reset_settings()
click.secho("The settings have been reseted!", fg="green")
ctx.invoke(settings_get)
| 34 | 75 | 0.641102 |
02bcdbac028e102733e25e78af33b137ba4a798c | 546 | py | Python | blast_api/urls.py | carrierx/carrierx-blast-api | 86e71bdddce272003805bb7a37ad9ad7f8de323a | [
"MIT"
] | null | null | null | blast_api/urls.py | carrierx/carrierx-blast-api | 86e71bdddce272003805bb7a37ad9ad7f8de323a | [
"MIT"
] | 1 | 2021-06-10T22:32:51.000Z | 2021-06-10T22:32:51.000Z | blast_api/urls.py | carrierx/carrierx-blast-api | 86e71bdddce272003805bb7a37ad9ad7f8de323a | [
"MIT"
] | null | null | null | from django.urls import include, path
from . import views
urlpatterns = [
path('shouts/<int:shout_id>;cancel', views.cancel_shout, name='cancel_shout'),
path('shouts/<int:shout_id>', views.get_shout, name='get_shout'),
path('shouts', views.shouts, name='shouts'),
path('calls', views.calls, name='calls'),
path('dnc/<str:userdata>', views.dnc, name='dnc'),
path('dnc/<str:userdata>/<str:number>', views.dnc_delete, name='delete_dnc'),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
]
| 39 | 82 | 0.681319 |
f6568e8aea40f0f4fe37d0c274b0d3b0e5330adb | 847 | py | Python | tests/test_phpstorm.py | henriklynggaard/ansible-role-phpstorm | 3f5eb9fa2c3173b09f44733336503348ae131958 | [
"MIT"
] | null | null | null | tests/test_phpstorm.py | henriklynggaard/ansible-role-phpstorm | 3f5eb9fa2c3173b09f44733336503348ae131958 | [
"MIT"
] | null | null | null | tests/test_phpstorm.py | henriklynggaard/ansible-role-phpstorm | 3f5eb9fa2c3173b09f44733336503348ae131958 | [
"MIT"
] | null | null | null | import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
'.molecule/ansible_inventory').get_hosts('all')
desktop_file_location = "/root/.local/share/applications/phpstorm-2017.2.desktop"
def test_desktop_file_exists(File):
f = File(desktop_file_location)
assert f.exists
assert f.is_file
def test_desktop_file_contains_fullpath(File):
f = File(desktop_file_location)
assert f.contains("/root/Tools/phpstorm-2017.2/bin/phpstorm.png")
assert f.contains("/root/Tools/phpstorm-2017.2/bin/phpstorm.sh")
def test_desktop_file_contains_right_name(File):
f = File(desktop_file_location)
assert f.contains("PhpStorm 2017.2")
def test_start_file_exists(File):
f = File('/root/Tools/phpstorm-2017.2/bin/phpstorm.sh')
assert f.exists
assert f.is_file
| 24.2 | 81 | 0.756789 |
057ccd1dad5757f92d0626f2d2e329241e2f3f92 | 1,415 | py | Python | var/spack/repos/builtin/packages/bismark/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2020-10-15T01:08:42.000Z | 2021-10-18T01:28:18.000Z | var/spack/repos/builtin/packages/bismark/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2019-07-30T10:12:28.000Z | 2019-12-17T09:02:27.000Z | var/spack/repos/builtin/packages/bismark/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5 | 2019-07-30T09:42:14.000Z | 2021-01-25T05:39:20.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Bismark(Package):
"""A tool to map bisulfite converted sequence reads and determine cytosine
methylation states"""
homepage = "https://www.bioinformatics.babraham.ac.uk/projects/bismark"
url = "https://github.com/FelixKrueger/Bismark/archive/0.19.0.tar.gz"
version('0.19.0', sha256='91707737f96a0574956a282b635abad7560e7d90bee188a67a7807b2470deae2')
version('0.18.2', sha256='83391c5b5af33047178e7774ac25f5a69ce9315c13ae02f016baf7c50b73e702')
depends_on('bowtie2', type='run')
depends_on('perl', type='run')
depends_on('samtools', type='run')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('bam2nuc', prefix.bin)
install('bismark', prefix.bin)
install('bismark_genome_preparation', prefix.bin)
install('bismark_methylation_extractor', prefix.bin)
install('bismark2bedGraph', prefix.bin)
install('bismark2report', prefix.bin)
install('bismark2summary', prefix.bin)
install('coverage2cytosine', prefix.bin)
install('deduplicate_bismark', prefix.bin)
install('filter_non_conversion', prefix.bin)
install('NOMe_filtering', prefix.bin)
| 39.305556 | 96 | 0.711661 |
516862444c68745f7cf6b36e806fa0e01bced2d8 | 247 | py | Python | test_url.py | thedognexttothetrashcan/taobao | c796ec1f61a5443406686f65badc1df6aaf7b711 | [
"Apache-2.0"
] | null | null | null | test_url.py | thedognexttothetrashcan/taobao | c796ec1f61a5443406686f65badc1df6aaf7b711 | [
"Apache-2.0"
] | 1 | 2020-12-05T15:32:37.000Z | 2020-12-05T15:32:37.000Z | test_url.py | xwh-p/taobao | c796ec1f61a5443406686f65badc1df6aaf7b711 | [
"Apache-2.0"
] | null | null | null | ul = 'https://list.tmall.com/search_product.htm?spm=a220m.1000858.0.0.785d7b19nELJh6&s={}&q=iiphone+x&sort=s&style=g&from=.list.pc_1_searchbutton&smAreaId=110100&type=pc#J_Filter'
for i in range(120, 900, 60):
url = ul.format(i)
print(url) | 61.75 | 179 | 0.728745 |
1b000be70ffbe52fa86011f6b827dc7e3756feba | 1,124 | py | Python | sigmapiweb/apps/PartyListV2/migrations/0014_alter_admin_form.py | Jacobvs/sigmapi-web | ca8d5a5294385fe5f4634c483a1278df904e2f85 | [
"MIT"
] | null | null | null | sigmapiweb/apps/PartyListV2/migrations/0014_alter_admin_form.py | Jacobvs/sigmapi-web | ca8d5a5294385fe5f4634c483a1278df904e2f85 | [
"MIT"
] | null | null | null | sigmapiweb/apps/PartyListV2/migrations/0014_alter_admin_form.py | Jacobvs/sigmapi-web | ca8d5a5294385fe5f4634c483a1278df904e2f85 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-18 13:51
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("PartyListV2", "0013_partycountrecord"),
]
operations = [
migrations.AlterField(
model_name="partyguest",
name="_cached_json",
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name="partyguest",
name="invite_used",
field=models.ForeignKey(
blank=True,
default=None,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="invites_used_for",
to=settings.AUTH_USER_MODEL,
),
),
migrations.AlterField(
model_name="partyguest",
name="time_first_signed_in",
field=models.DateTimeField(blank=True, null=True),
),
]
| 28.1 | 62 | 0.580071 |
20a707f7b22ebfedd9251ac2aa52cf45a800f382 | 2,456 | py | Python | autotest/gdrivers/pcraster.py | ajolma/gdal | 19d847c8519919fcd1e7e7247644d28771034317 | [
"MIT"
] | 1 | 2018-12-19T14:08:20.000Z | 2018-12-19T14:08:20.000Z | autotest/gdrivers/pcraster.py | ajolma/gdal | 19d847c8519919fcd1e7e7247644d28771034317 | [
"MIT"
] | 3 | 2019-02-27T00:43:06.000Z | 2019-06-28T21:57:10.000Z | autotest/gdrivers/pcraster.py | ajolma/gdal | 19d847c8519919fcd1e7e7247644d28771034317 | [
"MIT"
] | 1 | 2019-11-01T15:17:09.000Z | 2019-11-01T15:17:09.000Z | #!/usr/bin/env pytest
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test PCRaster driver support.
# Author: Frank Warmerdam <warmerdam@pobox.com>
#
###############################################################################
# Copyright (c) 2004, Frank Warmerdam <warmerdam@pobox.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
from osgeo import gdal
import gdaltest
import pytest
###############################################################################
# Perform simple read test.
def test_pcraster_1():
gdaltest.pcraster_drv = gdal.GetDriverByName('PCRaster')
if gdaltest.pcraster_drv is None:
pytest.skip()
tst = gdaltest.GDALTest('PCRaster', 'ldd.map', 1, 4528)
return tst.testOpen()
###############################################################################
# Verify some auxiliary data.
def test_pcraster_2():
if gdaltest.pcraster_drv is None:
pytest.skip()
ds = gdal.Open('data/ldd.map')
gt = ds.GetGeoTransform()
assert gt[0] == 182140.0 and gt[1] == 10 and gt[2] == 0 and gt[3] == 327880.0 and gt[4] == 0 and gt[5] == -10, \
'PCRaster geotransform wrong.'
band1 = ds.GetRasterBand(1)
assert band1.GetNoDataValue() == 255, 'PCRaster NODATA value wrong or missing.'
| 33.643836 | 116 | 0.605863 |
01ffbb83743724b757a9147f57c06457be825e62 | 944 | py | Python | rss_downloader/files.py | blade2005/rss-downloader | 5fabedc62e86067e4a95b90db84a4fa324ad314e | [
"Apache-2.0"
] | null | null | null | rss_downloader/files.py | blade2005/rss-downloader | 5fabedc62e86067e4a95b90db84a4fa324ad314e | [
"Apache-2.0"
] | 19 | 2022-02-22T19:42:53.000Z | 2022-03-28T12:26:10.000Z | rss_downloader/files.py | blade2005/rss-downloader | 5fabedc62e86067e4a95b90db84a4fa324ad314e | [
"Apache-2.0"
] | null | null | null | """File related functions for RSS Downloader."""
import argparse
import pathlib
from typing import Generator, List, Tuple
import feedparser
def _output_path(output_dir: str, title: str, href: str) -> pathlib.PurePath:
"""Join path from the base dir, title, and href."""
return pathlib.PurePath(output_dir, title + pathlib.PurePath(href).suffix)
def files_generator(
entries: List[feedparser.util.FeedParserDict], args: argparse.Namespace
) -> Generator[Tuple[pathlib.PurePath, str, str], None, None]:
"""Generate entries to download."""
# import code;code.interact(local={**globals(),**locals()})
for entry in entries:
entry.title
for link in entry.links:
if link.type.startswith("audio"):
yield (
_output_path(args.output_dir, entry.title, link.href),
entry.title,
link.href,
)
# break
| 32.551724 | 78 | 0.630297 |
fa3a4aec8ed68823753a55c4b952048600d10f52 | 2,245 | py | Python | acapy_client/models/dif_proof_request.py | dbluhm/acapy-client | d92ef607ba2ff1152ec15429f2edb20976991424 | [
"Apache-2.0"
] | 4 | 2021-08-05T09:20:34.000Z | 2021-08-08T19:37:29.000Z | acapy_client/models/dif_proof_request.py | dbluhm/acapy-client | d92ef607ba2ff1152ec15429f2edb20976991424 | [
"Apache-2.0"
] | null | null | null | acapy_client/models/dif_proof_request.py | dbluhm/acapy-client | d92ef607ba2ff1152ec15429f2edb20976991424 | [
"Apache-2.0"
] | 2 | 2021-08-12T18:18:45.000Z | 2021-08-14T13:22:28.000Z | from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.dif_options import DIFOptions
from ..models.presentation_definition import PresentationDefinition
from ..types import UNSET, Unset
T = TypeVar("T", bound="DIFProofRequest")
@attr.s(auto_attribs=True)
class DIFProofRequest:
""" """
presentation_definition: PresentationDefinition
options: Union[Unset, DIFOptions] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
presentation_definition = self.presentation_definition.to_dict()
options: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.options, Unset):
options = self.options.to_dict()
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"presentation_definition": presentation_definition,
}
)
if options is not UNSET:
field_dict["options"] = options
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
presentation_definition = PresentationDefinition.from_dict(d.pop("presentation_definition"))
_options = d.pop("options", UNSET)
options: Union[Unset, DIFOptions]
if isinstance(_options, Unset):
options = UNSET
else:
options = DIFOptions.from_dict(_options)
dif_proof_request = cls(
presentation_definition=presentation_definition,
options=options,
)
dif_proof_request.additional_properties = d
return dif_proof_request
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| 30.337838 | 100 | 0.656125 |
ebfc39e9737cdae10eaa4074332e93e9e3e112c2 | 56 | py | Python | setup.py | jbalooka/falcon-rest | eafffe6dc6709f8f48b3b3d5afec903b6a3c366d | [
"Apache-2.0"
] | null | null | null | setup.py | jbalooka/falcon-rest | eafffe6dc6709f8f48b3b3d5afec903b6a3c366d | [
"Apache-2.0"
] | null | null | null | setup.py | jbalooka/falcon-rest | eafffe6dc6709f8f48b3b3d5afec903b6a3c366d | [
"Apache-2.0"
] | null | null | null | falcon, falcon-jsonify, falconjsonio, peewee, jsonpickle | 56 | 56 | 0.839286 |
b1fc900b4399142f997d712f12965ab2f61106be | 2,446 | py | Python | summarize_from_feedback/model_layout.py | lumaway/summarize-from-feedback | 065ea4e1607a5822a3d78cc13a8cec63a2919c1b | [
"MIT"
] | 365 | 2020-09-04T15:50:14.000Z | 2022-03-31T04:54:32.000Z | summarize_from_feedback/model_layout.py | ArturTtoptal/summarize-from-feedback | 56b6bb613a1b58a8aa7a5e29266f65c7b980ee48 | [
"CC-BY-4.0"
] | 14 | 2020-09-06T14:52:29.000Z | 2022-02-18T08:05:06.000Z | summarize_from_feedback/model_layout.py | ArturTtoptal/summarize-from-feedback | 56b6bb613a1b58a8aa7a5e29266f65c7b980ee48 | [
"CC-BY-4.0"
] | 60 | 2020-09-04T16:33:48.000Z | 2022-01-28T19:01:42.000Z | import math
import numpy as np
class ModelLayout:
"""Holds the structure of the model and the current rank's position within it"""
@classmethod
def standard(cls, *, total_gpus, my_rank, n_shards=1):
assert my_rank < total_gpus, f"Bad rank {my_rank} for total_gpus = {total_gpus}"
ranks = np.arange(0, total_gpus)
gpus_per_replica = n_shards
assert (
total_gpus % gpus_per_replica == 0
), f"Total GPUs ({total_gpus}) is not divisible by {gpus_per_replica}"
replicas = total_gpus // gpus_per_replica
layout_np = np.reshape(ranks, [replicas, n_shards])
return cls(layout_np, my_rank)
def __eq__(self, other):
if not isinstance(other, ModelLayout):
return False
if self.my_rank != other.my_rank:
return False
return np.array_equal(self.layout, other.layout)
def __hash__(self):
# Best way to hash a numpy array according to stack overflow
# https://stackoverflow.com/a/16592241/610785
return hash((self.layout.tostring(), self.my_rank))
def __init__(self, layout, my_rank):
"""Layout is a numpy array with replica, shard"""
self.layout = layout
self.my_rank = my_rank
self.total_gpus = layout.size
self.all_ranks = list(range(self.total_gpus))
self.n_replicas, self.n_shards = layout.shape
if self.n_shards == 4:
print(
"WARNING: Using n_shards == 4 is currently slow because we have not"
"implemented an efficient ring following the [0,1,3,2] pattern"
)
([replica_idx], [shard_idx]) = np.where(layout == my_rank)
self.replica_idx = int(replica_idx)
self.shard_idx = int(shard_idx)
# Create convenient accessors
self.dp_sibling_ranks = [replica[shard_idx] for replica in layout]
self.mp_sibling_ranks = list(layout[replica_idx])
self.ranks_in_my_replica = layout[replica_idx].flatten().tolist()
self.is_in_first_replica = self.replica_idx == 0
self.replica_root = self.ranks_in_my_replica[0]
self.is_replica_root = self.replica_root == self.my_rank
self.is_logging_rank = self.is_replica_root and self.replica_idx == 0
self.ranks_on_my_node = list(
range(math.floor(self.my_rank / 8) * 8, 8 + math.floor(self.my_rank / 8) * 8)
)
| 33.506849 | 89 | 0.636549 |
66343a2f5710e3c98d4b2358e0b8f66072a9ac72 | 5,381 | py | Python | model/statistics_model.py | trinity-project/trinity-eth | a4e4fff1d1dbc0b422d7acc21ed95a308cf51967 | [
"MIT"
] | 15 | 2018-05-11T06:09:47.000Z | 2020-07-30T05:59:41.000Z | model/statistics_model.py | trinity-project/trinity-eth | a4e4fff1d1dbc0b422d7acc21ed95a308cf51967 | [
"MIT"
] | null | null | null | model/statistics_model.py | trinity-project/trinity-eth | a4e4fff1d1dbc0b422d7acc21ed95a308cf51967 | [
"MIT"
] | 6 | 2018-08-06T19:00:35.000Z | 2020-12-03T02:13:45.000Z | # --*-- coding : utf-8 --*--
"""Author: Trinity Core Team
MIT License
Copyright (c) 2018 Trinity
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
from .manager import DBManager, rpc_response, connection_singleton
from .base_enum import EnumAssetType, EnumChannelState
from common.log import LOG
class TBLStatistics(DBManager):
"""
Descriptions :
Created : 2018-02-13
Modified : 2018-03-21
"""
def add_one(self, address: str):
return super(TBLStatistics, self).add(address=address, total_channel=0, opend_channel=0,
settled_channel=0,closed_channel=0,
total_rsmc_transaction=0, rsmc_successed=0,payment=0,income=0,
total_htlc_transaction=0, htlc_successed=0,total_free=0)
def update(self, address, **kwargs):
keys = kwargs.keys()
if 'state' in keys:
state = kwargs.pop('state', None)
if state == EnumChannelState.INIT.name:
kwargs.update({'total_channel': 1})
elif state == EnumChannelState.OPENED.name:
kwargs.update({'opend_channel': 1})
elif state == EnumChannelState.SETTLED.name:
kwargs.update({'settled_channel': 1})
kwargs.update({'opend_channel': -1})
elif state == EnumChannelState.CLOSED.name:
kwargs.update({'closed_channel': 1})
kwargs.update({'opend_channel': -1})
elif 'rsmc' in keys:
kwargs.pop('rsmc', None)
kwargs.update({'total_rsmc_transaction': 1})
elif 'payment' in keys and 'payer' in keys:
payment = kwargs.pop('payment', 0)
payer = kwargs.pop('payer', False)
is_htlc_to_rsmc = kwargs.pop('is_htlc_to_rsmc', False)
if payer:
kwargs.update({'payment': int(payment)})
else:
kwargs.update({'income': int(payment)})
kwargs.update({'rsmc_successed': 1})
elif 'htlc_free' in keys:
free = kwargs.pop('htlc_free', 0)
kwargs.update({'total_htlc_transaction': 1})
kwargs.update({'total_free': free})
elif 'htlc_rcode' in keys:
kwargs.pop('htlc_rcode', True)
kwargs.update({'htlc_successed': 1})
return super(TBLStatistics, self).update_one_statistics(address, **kwargs)
def remove_unsupported_asset(self, asset):
if not asset:
return True
try:
for asset_type in asset.keys():
if not self.is_valid_asset_type(asset_type):
asset.pop(asset_type)
except Exception as exp_info:
LOG.error('Error asset of users. Asset: {}'.format(asset))
return False
return True
@staticmethod
def is_valid_channel_state(state):
return state.upper() in EnumChannelState.__members__
@staticmethod
def is_valid_asset_type(asset_type):
return asset_type.upper() in EnumAssetType.__members__
@property
@connection_singleton
def client(self):
return super(TBLStatistics, self).client
@property
def db_table(self):
return self.client.db.Statistics
@property
def primary_key(self):
return 'address'
@property
def required_item(self):
return ['address',
'total_channel', 'opend_channel', 'settled_channel', 'closed_channel',
'total_transaction', 'rsmc_successed', 'payment', 'income',
'total_htlc_transaction', 'htlc_successed', 'total_free']
class APIStatistics(object):
table = TBLStatistics()
@classmethod
def add_statistics(cls, address):
return cls.table.add_one(address)
@classmethod
def query_statistics(cls, address, *args, **kwargs):
return cls.table.query_one(address, *args, **kwargs)
@classmethod
def batch_query_statistics(cls, filters, *args, **kwargs):
return cls.table.query_many(filters, *args, **kwargs)
@classmethod
def update_statistics(cls, address, **kwargs):
return cls.table.update(address, **kwargs)
@classmethod
def batch_update_statistics(cls, filters, **kwargs):
return cls.table.update_many(filters, **kwargs)
| 34.49359 | 105 | 0.637056 |
fe44484a1c17283af04fa43a93be203f22375d22 | 1,315 | py | Python | maize_detrend_lowess.py | limanqing/crop_climate | bfc50fbf57ce3a96ba7d29de53a76fc7a1dc2d2f | [
"MIT"
] | null | null | null | maize_detrend_lowess.py | limanqing/crop_climate | bfc50fbf57ce3a96ba7d29de53a76fc7a1dc2d2f | [
"MIT"
] | null | null | null | maize_detrend_lowess.py | limanqing/crop_climate | bfc50fbf57ce3a96ba7d29de53a76fc7a1dc2d2f | [
"MIT"
] | null | null | null | #coding=utf-8
import pandas as pd
import statsmodels.api as sm
#import pylab
import glob
def Lowess_detrend(x,y):
# z = sm.nonparametric.lowess(y, x)
# z1 = sm.nonparametric.lowess(y, x, frac=0.1)
# z45 = sm.nonparametric.lowess(y, x, frac=0.45)
z9 = sm.nonparametric.lowess(y, x, frac=0.9)
# pylab.plot(x, y, 'o')
# pylab.plot(z[:,0], z[:,1], 'r-')
# pylab.plot(z1[:,0], z1[:,1], 'g-')
# pylab.plot(z45[:,0], z45[:,1], 'b-')
# pylab.plot(z9[:,0], z9[:,1], 'y-')
# pylab.show()
return z9[:,1]
if __name__ == '__main__':
base_dir = r'F:\crop-climate\maizecsv\*.csv'
filelist = glob.glob(base_dir)
for filename in filelist:
df = pd.read_csv(filename) #用pandas读入数据
grid_id=filename[-10:-4]
year_list = df['Year'] #获取年份列("Year")的数据
yield_list = df['Value'] #获取年份列("Value")的数据
ys=Lowess_detrend(year_list, yield_list)
dataframe1=pd.DataFrame({'Year':year_list,'Value':yield_list-ys})
dataframe1.to_csv(r'F:\crop-climate\maize_detrend\lowess-additive/%s.csv' % (grid_id),index=False)
dataframe2=pd.DataFrame({'Year':year_list,'Value':yield_list/ys})
dataframe2.to_csv(r'F:\crop-climate\maize_detrend\lowess-multiplicative/%s.csv' % (grid_id),index=False) | 39.848485 | 112 | 0.609125 |
4431dcac4b07789aa2a79c12e58b8b19a9bb45d6 | 5,780 | py | Python | PA1/Assignment1/submit.py | csMOOC/UIUC.TR | 6b3692d06e09eacdebe0571862da60392bc2b038 | [
"MIT"
] | null | null | null | PA1/Assignment1/submit.py | csMOOC/UIUC.TR | 6b3692d06e09eacdebe0571862da60392bc2b038 | [
"MIT"
] | null | null | null | PA1/Assignment1/submit.py | csMOOC/UIUC.TR | 6b3692d06e09eacdebe0571862da60392bc2b038 | [
"MIT"
] | null | null | null | import urllib
import urllib2
import hashlib
import random
import email
import email.message
import email.encoders
import StringIO
import sys
""""""""""""""""""""
""""""""""""""""""""
class NullDevice:
def write(self, s):
pass
def submit():
print '==\n== Submitting Solutions \n=='
(login, password) = loginPrompt()
if not login:
print '!! Submission Cancelled'
return
print '\n== Connecting to Coursera ... '
# Part Identifier
(partIdx, sid) = partPrompt()
# Get Challenge
(login, ch, state, ch_aux) = getChallenge(login, sid) #sid is the "part identifier"
if((not login) or (not ch) or (not state)):
# Some error occured, error string in first return element.
print '\n!! Error: %s\n' % login
return
# Attempt Submission with Challenge
ch_resp = challengeResponse(login, password, ch)
(result, string) = submitSolution(login, ch_resp, sid, output(partIdx), \
source(partIdx), state, ch_aux)
print '== %s' % string.strip()
# =========================== LOGIN HELPERS - NO NEED TO CONFIGURE THIS =======================================
def loginPrompt():
"""Prompt the user for login credentials. Returns a tuple (login, password)."""
(login, password) = basicPrompt()
return login, password
def basicPrompt():
"""Prompt the user for login credentials. Returns a tuple (login, password)."""
login = raw_input('Login (Email address): ')
password = raw_input('One-time Password (from the assignment page. This is NOT your own account\'s password): ')
return login, password
def partPrompt():
print 'Hello! These are the assignment parts that you can submit:'
counter = 0
for part in partFriendlyNames:
counter += 1
print str(counter) + ') ' + partFriendlyNames[counter - 1]
partIdx = int(raw_input('Please enter which part you want to submit (1-' + str(counter) + '): ')) - 1
return (partIdx, partIds[partIdx])
def getChallenge(email, sid):
"""Gets the challenge salt from the server. Returns (email,ch,state,ch_aux)."""
url = challenge_url()
values = {'email_address' : email, 'assignment_part_sid' : sid, 'response_encoding' : 'delim'}
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
text = response.read().strip()
# text is of the form email|ch|signature
splits = text.split('|')
if(len(splits) != 9):
print 'Badly formatted challenge response: %s' % text
return None
return (splits[2], splits[4], splits[6], splits[8])
def challengeResponse(email, passwd, challenge):
sha1 = hashlib.sha1()
sha1.update("".join([challenge, passwd])) # hash the first elements
digest = sha1.hexdigest()
strAnswer = ''
for i in range(0, len(digest)):
strAnswer = strAnswer + digest[i]
return strAnswer
def challenge_url():
"""Returns the challenge url."""
return "https://class.coursera.org/" + URL + "/assignment/challenge"
def submit_url():
"""Returns the submission url."""
return "https://class.coursera.org/" + URL + "/assignment/submit"
def submitSolution(email_address, ch_resp, sid, output, source, state, ch_aux):
"""Submits a solution to the server. Returns (result, string)."""
source_64_msg = email.message.Message()
source_64_msg.set_payload(source)
email.encoders.encode_base64(source_64_msg)
output_64_msg = email.message.Message()
output_64_msg.set_payload(output)
email.encoders.encode_base64(output_64_msg)
values = { 'assignment_part_sid' : sid, \
'email_address' : email_address, \
'submission' : output_64_msg.get_payload(), \
'submission_aux' : source_64_msg.get_payload(), \
'challenge_response' : ch_resp, \
'state' : state \
}
url = submit_url()
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req)
string = response.read().strip()
result = 0
return result, string
## This collects the source code (just for logging purposes)
def source(partIdx):
# open the file, get all lines
f = open(sourceFiles[partIdx])
src = f.read()
f.close()
return src
############ BEGIN ASSIGNMENT SPECIFIC CODE ##############
URL = 'textretrieval-001'
partIds = ['UTxjvbrp','3ibilCQk','phynxUM3','WivqChja','TTmvRAb9','h8KcLGd6','i3Eg0sY8','KqynGL9q', '5Nogooq6']
partFriendlyNames = ['Task 1', 'Task 2', 'Task 3', 'Task 4', 'Task 5', 'Task 6', 'Task 7', 'Task 8', 'Bonus']
sourceFiles = ['doc.stops.txt', 'doc.stems.txt', 'doc.pos-tagged.txt', 'task4.txt', 'task5.txt', 'task6.txt', 'task7.txt', 'task8.txt', 'doc.stopstem.txt']
def output(partIdx):
outputString = ''
if partIdx == 0:
infile = open("doc.stops.txt")
for line in infile:
outputString += line
elif partIdx == 1:
infile = open("doc.stems.txt")
for line in infile:
outputString += line
elif partIdx == 2:
infile = open("doc.pos-tagged.txt")
for line in infile:
outputString += line
elif partIdx == 3:
infile = open("task4.txt")
for line in infile:
outputString += line
elif partIdx == 4:
infile = open("task5.txt")
for line in infile:
outputString += line
elif partIdx == 5:
infile = open("task6.txt")
for line in infile:
outputString += line
elif partIdx == 6:
infile = open("task7.txt")
for line in infile:
outputString += line
elif partIdx == 7:
infile = open("task8.txt")
for line in infile:
outputString += line
elif partIdx == 8:
infile = open("doc.stopstem.txt")
for line in infile:
outputString += line
return outputString
submit()
| 29.191919 | 179 | 0.631661 |
e1de7e886625e7d82deca3644552eba94a72f718 | 51,981 | py | Python | holoviews/plotting/util.py | TheoMathurin/holoviews | 0defcef994d6dd6d2054f75a0e332d02d121f8b0 | [
"BSD-3-Clause"
] | 864 | 2019-11-13T08:18:27.000Z | 2022-03-31T13:36:13.000Z | holoviews/plotting/util.py | chrinide/holoviews | e1234a60ae0809ac561c204b1998dff0452b2bf0 | [
"BSD-3-Clause"
] | 1,117 | 2019-11-12T16:15:59.000Z | 2022-03-30T22:57:59.000Z | holoviews/plotting/util.py | chrinide/holoviews | e1234a60ae0809ac561c204b1998dff0452b2bf0 | [
"BSD-3-Clause"
] | 180 | 2019-11-19T16:44:44.000Z | 2022-03-28T22:49:18.000Z | import re
import traceback
import warnings
import bisect
from collections import defaultdict, namedtuple
import numpy as np
import param
from ..core import (
HoloMap, DynamicMap, CompositeOverlay, Layout, Overlay, GridSpace,
NdLayout, NdOverlay, AdjointLayout
)
from ..core.options import CallbackError, Cycle
from ..core.operation import Operation
from ..core.ndmapping import item_check
from ..core.spaces import get_nested_streams
from ..core.util import (
match_spec, wrap_tuple, get_overlay_spec, unique_iterator,
closest_match, is_number, isfinite, python2sort, disable_constant,
arraylike_types
)
from ..streams import LinkedStream, Params
from ..util.transform import dim
def displayable(obj):
"""
Predicate that returns whether the object is displayable or not
(i.e. whether the object obeys the nesting hierarchy)
"""
if isinstance(obj, Overlay) and any(isinstance(o, (HoloMap, GridSpace, AdjointLayout))
for o in obj):
return False
if isinstance(obj, HoloMap):
return not (obj.type in [Layout, GridSpace, NdLayout, DynamicMap])
if isinstance(obj, (GridSpace, Layout, NdLayout)):
for el in obj.values():
if not displayable(el):
return False
return True
return True
class Warning(param.Parameterized): pass
display_warning = Warning(name='Warning')
def collate(obj):
if isinstance(obj, Overlay):
nested_type = [type(o).__name__ for o in obj
if isinstance(o, (HoloMap, GridSpace, AdjointLayout))][0]
display_warning.param.warning(
"Nesting %ss within an Overlay makes it difficult to "
"access your data or control how it appears; we recommend "
"calling .collate() on the Overlay in order to follow the "
"recommended nesting structure shown in the Composing Data "
"user guide (http://goo.gl/2YS8LJ)" % nested_type)
return obj.collate()
if isinstance(obj, DynamicMap):
if obj.type in [DynamicMap, HoloMap]:
obj_name = obj.type.__name__
raise Exception("Nesting a %s inside a DynamicMap is not "
"supported. Ensure that the DynamicMap callback "
"returns an Element or (Nd)Overlay. If you have "
"applied an operation ensure it is not dynamic by "
"setting dynamic=False." % obj_name)
return obj.collate()
if isinstance(obj, HoloMap):
display_warning.param.warning(
"Nesting {0}s within a {1} makes it difficult to access "
"your data or control how it appears; we recommend "
"calling .collate() on the {1} in order to follow the "
"recommended nesting structure shown in the Composing "
"Data user guide (https://goo.gl/2YS8LJ)".format(
obj.type.__name__, type(obj).__name__))
return obj.collate()
elif isinstance(obj, (Layout, NdLayout)):
try:
display_warning.param.warning(
"Layout contains HoloMaps which are not nested in the "
"recommended format for accessing your data; calling "
".collate() on these objects will resolve any violations "
"of the recommended nesting presented in the Composing Data "
"tutorial (https://goo.gl/2YS8LJ)")
expanded = []
for el in obj.values():
if isinstance(el, HoloMap) and not displayable(el):
collated_layout = Layout(el.collate())
expanded.extend(collated_layout.values())
return Layout(expanded)
except:
raise Exception(undisplayable_info(obj))
else:
raise Exception(undisplayable_info(obj))
def isoverlay_fn(obj):
"""
Determines whether object is a DynamicMap returning (Nd)Overlay types.
"""
return isinstance(obj, DynamicMap) and (isinstance(obj.last, CompositeOverlay))
def overlay_depth(obj):
"""
Computes the depth of a DynamicMap overlay if it can be determined
otherwise return None.
"""
if isinstance(obj, DynamicMap):
if isinstance(obj.last, CompositeOverlay):
return len(obj.last)
elif obj.last is None:
return None
return 1
else:
return 1
def compute_overlayable_zorders(obj, path=[]):
"""
Traverses an overlayable composite container to determine which
objects are associated with specific (Nd)Overlay layers by
z-order, making sure to take DynamicMap Callables into
account. Returns a mapping between the zorders of each layer and a
corresponding lists of objects.
Used to determine which overlaid subplots should be linked with
Stream callbacks.
"""
path = path+[obj]
zorder_map = defaultdict(list)
# Process non-dynamic layers
if not isinstance(obj, DynamicMap):
if isinstance(obj, CompositeOverlay):
for z, o in enumerate(obj):
zorder_map[z] = [o, obj]
elif isinstance(obj, HoloMap):
for el in obj.values():
if isinstance(el, CompositeOverlay):
for k, v in compute_overlayable_zorders(el, path).items():
zorder_map[k] += v + [obj]
else:
zorder_map[0] += [obj, el]
else:
if obj not in zorder_map[0]:
zorder_map[0].append(obj)
return zorder_map
isoverlay = isinstance(obj.last, CompositeOverlay)
isdynoverlay = obj.callback._is_overlay
if obj not in zorder_map[0] and not isoverlay:
zorder_map[0].append(obj)
depth = overlay_depth(obj)
# Process the inputs of the DynamicMap callback
dmap_inputs = obj.callback.inputs if obj.callback.link_inputs else []
for z, inp in enumerate(dmap_inputs):
no_zorder_increment = False
if any(not (isoverlay_fn(p) or p.last is None) for p in path) and isoverlay_fn(inp):
# If overlay has been collapsed do not increment zorder
no_zorder_increment = True
input_depth = overlay_depth(inp)
if depth is not None and input_depth is not None and depth < input_depth:
# Skips branch of graph where the number of elements in an
# overlay has been reduced but still contains more than one layer
if depth > 1:
continue
else:
no_zorder_increment = True
# Recurse into DynamicMap.callback.inputs and update zorder_map
z = z if isdynoverlay else 0
deep_zorders = compute_overlayable_zorders(inp, path=path)
offset = max(zorder_map.keys())
for dz, objs in deep_zorders.items():
global_z = offset+z if no_zorder_increment else offset+dz+z
zorder_map[global_z] = list(unique_iterator(zorder_map[global_z]+objs))
# If object branches but does not declare inputs (e.g. user defined
# DynamicMaps returning (Nd)Overlay) add the items on the DynamicMap.last
found = any(isinstance(p, DynamicMap) and p.callback._is_overlay for p in path)
linked = any(isinstance(s, (LinkedStream, Params)) and s.linked
for s in obj.streams)
if (found or linked) and isoverlay and not isdynoverlay:
offset = max(zorder_map.keys())
for z, o in enumerate(obj.last):
if isoverlay and linked:
zorder_map[offset+z].append(obj)
if o not in zorder_map[offset+z]:
zorder_map[offset+z].append(o)
return zorder_map
def is_dynamic_overlay(dmap):
"""
Traverses a DynamicMap graph and determines if any components
were overlaid dynamically (i.e. by * on a DynamicMap).
"""
if not isinstance(dmap, DynamicMap):
return False
elif dmap.callback._is_overlay:
return True
else:
return any(is_dynamic_overlay(dm) for dm in dmap.callback.inputs)
def split_dmap_overlay(obj, depth=0):
"""
Splits a DynamicMap into the original component layers it was
constructed from by traversing the graph to search for dynamically
overlaid components (i.e. constructed by using * on a DynamicMap).
Useful for assigning subplots of an OverlayPlot the streams that
are responsible for driving their updates. Allows the OverlayPlot
to determine if a stream update should redraw a particular
subplot.
"""
layers = []
if isinstance(obj, DynamicMap):
initialize_dynamic(obj)
if issubclass(obj.type, NdOverlay) and not depth:
for v in obj.last.values():
layers.append(obj)
elif issubclass(obj.type, Overlay):
if obj.callback.inputs and is_dynamic_overlay(obj):
for inp in obj.callback.inputs:
layers += split_dmap_overlay(inp, depth+1)
else:
for v in obj.last.values():
layers.append(obj)
else:
layers.append(obj)
return layers
if isinstance(obj, Overlay):
for k, v in obj.items():
layers.append(v)
else:
layers.append(obj)
return layers
def initialize_dynamic(obj):
"""
Initializes all DynamicMap objects contained by the object
"""
dmaps = obj.traverse(lambda x: x, specs=[DynamicMap])
for dmap in dmaps:
if dmap.unbounded:
# Skip initialization until plotting code
continue
if not len(dmap):
dmap[dmap._initial_key()]
def get_plot_frame(map_obj, key_map, cached=False):
"""Returns the current frame in a mapping given a key mapping.
Args:
obj: Nested Dimensioned object
key_map: Dictionary mapping between dimensions and key value
cached: Whether to allow looking up key in cache
Returns:
The item in the mapping corresponding to the supplied key.
"""
if (map_obj.kdims and len(map_obj.kdims) == 1 and map_obj.kdims[0] == 'Frame' and
not isinstance(map_obj, DynamicMap)):
# Special handling for static plots
return map_obj.last
key = tuple(key_map[kd.name] for kd in map_obj.kdims if kd.name in key_map)
if key in map_obj.data and cached:
return map_obj.data[key]
else:
try:
return map_obj[key]
except KeyError:
return None
except (StopIteration, CallbackError) as e:
raise e
except Exception:
print(traceback.format_exc())
return None
def get_nested_plot_frame(obj, key_map, cached=False):
"""Extracts a single frame from a nested object.
Replaces any HoloMap or DynamicMap in the nested data structure,
with the item corresponding to the supplied key.
Args:
obj: Nested Dimensioned object
key_map: Dictionary mapping between dimensions and key value
cached: Whether to allow looking up key in cache
Returns:
Nested datastructure where maps are replaced with single frames
"""
clone = obj.map(lambda x: x)
# Ensure that DynamicMaps in the cloned frame have
# identical callback inputs to allow memoization to work
for it1, it2 in zip(obj.traverse(lambda x: x), clone.traverse(lambda x: x)):
if isinstance(it1, DynamicMap):
with disable_constant(it2.callback):
it2.callback.inputs = it1.callback.inputs
with item_check(False):
return clone.map(lambda x: get_plot_frame(x, key_map, cached=cached),
[DynamicMap, HoloMap], clone=False)
def undisplayable_info(obj, html=False):
"Generate helpful message regarding an undisplayable object"
collate = '<tt>collate</tt>' if html else 'collate'
info = "For more information, please consult the Composing Data tutorial (http://git.io/vtIQh)"
if isinstance(obj, HoloMap):
error = "HoloMap of %s objects cannot be displayed." % obj.type.__name__
remedy = "Please call the %s method to generate a displayable object" % collate
elif isinstance(obj, Layout):
error = "Layout containing HoloMaps of Layout or GridSpace objects cannot be displayed."
remedy = "Please call the %s method on the appropriate elements." % collate
elif isinstance(obj, GridSpace):
error = "GridSpace containing HoloMaps of Layouts cannot be displayed."
remedy = "Please call the %s method on the appropriate elements." % collate
if not html:
return '\n'.join([error, remedy, info])
else:
return "<center>{msg}</center>".format(msg=('<br>'.join(
['<b>%s</b>' % error, remedy, '<i>%s</i>' % info])))
def compute_sizes(sizes, size_fn, scaling_factor, scaling_method, base_size):
"""
Scales point sizes according to a scaling factor,
base size and size_fn, which will be applied before
scaling.
"""
if sizes.dtype.kind not in ('i', 'f'):
return None
if scaling_method == 'area':
pass
elif scaling_method == 'width':
scaling_factor = scaling_factor**2
else:
raise ValueError(
'Invalid value for argument "scaling_method": "{}". '
'Valid values are: "width", "area".'.format(scaling_method))
sizes = size_fn(sizes)
return (base_size*scaling_factor*sizes)
def get_axis_padding(padding):
"""
Process a padding value supplied as a tuple or number and returns
padding values for x-, y- and z-axis.
"""
if isinstance(padding, tuple):
if len(padding) == 2:
xpad, ypad = padding
zpad = 0
elif len(padding) == 3:
xpad, ypad, zpad = padding
else:
raise ValueError('Padding must be supplied as an number applied '
'to all axes or a length two or three tuple '
'corresponding to the x-, y- and optionally z-axis')
else:
xpad, ypad, zpad = (padding,)*3
return (xpad, ypad, zpad)
def get_minimum_span(low, high, span):
"""
If lower and high values are equal ensures they are separated by
the defined span.
"""
if is_number(low) and low == high:
if isinstance(low, np.datetime64):
span = span * np.timedelta64(1, 's')
low, high = low-span, high+span
return low, high
def get_range(element, ranges, dimension):
"""
Computes the data, soft- and hard-range along a dimension given
an element and a dictionary of ranges.
"""
if dimension and dimension != 'categorical':
if ranges and dimension.name in ranges:
drange = ranges[dimension.name]['data']
srange = ranges[dimension.name]['soft']
hrange = ranges[dimension.name]['hard']
else:
drange = element.range(dimension, dimension_range=False)
srange = dimension.soft_range
hrange = dimension.range
else:
drange = srange = hrange = (np.NaN, np.NaN)
return drange, srange, hrange
def get_sideplot_ranges(plot, element, main, ranges):
"""
Utility to find the range for an adjoined
plot given the plot, the element, the
Element the plot is adjoined to and the
dictionary of ranges.
"""
key = plot.current_key
dims = element.dimensions()
dim = dims[0] if 'frequency' in dims[1].name or 'count' in dims[1].name else dims[1]
range_item = main
if isinstance(main, HoloMap):
if issubclass(main.type, CompositeOverlay):
range_item = [hm for hm in main._split_overlays()[1]
if dim in hm.dimensions('all')][0]
else:
range_item = HoloMap({0: main}, kdims=['Frame'])
ranges = match_spec(range_item.last, ranges)
if dim.name in ranges:
main_range = ranges[dim.name]['combined']
else:
framewise = plot.lookup_options(range_item.last, 'norm').options.get('framewise')
if framewise and range_item.get(key, False):
main_range = range_item[key].range(dim)
else:
main_range = range_item.range(dim)
# If .main is an NdOverlay or a HoloMap of Overlays get the correct style
if isinstance(range_item, HoloMap):
range_item = range_item.last
if isinstance(range_item, CompositeOverlay):
range_item = [ov for ov in range_item
if dim in ov.dimensions('all')][0]
return range_item, main_range, dim
def within_range(range1, range2):
"""Checks whether range1 is within the range specified by range2."""
range1 = [r if isfinite(r) else None for r in range1]
range2 = [r if isfinite(r) else None for r in range2]
return ((range1[0] is None or range2[0] is None or range1[0] >= range2[0]) and
(range1[1] is None or range2[1] is None or range1[1] <= range2[1]))
def validate_unbounded_mode(holomaps, dynmaps):
composite = HoloMap(enumerate(holomaps), kdims=['testing_kdim'])
holomap_kdims = set(unique_iterator([kd.name for dm in holomaps for kd in dm.kdims]))
hmranges = {d: composite.range(d) for d in holomap_kdims}
if any(not set(d.name for d in dm.kdims) <= holomap_kdims
for dm in dynmaps):
raise Exception('DynamicMap that are unbounded must have key dimensions that are a '
'subset of dimensions of the HoloMap(s) defining the keys.')
elif not all(within_range(hmrange, dm.range(d)) for dm in dynmaps
for d, hmrange in hmranges.items() if d in dm.kdims):
raise Exception('HoloMap(s) have keys outside the ranges specified on '
'the DynamicMap(s).')
def get_dynamic_mode(composite):
"Returns the common mode of the dynamic maps in given composite object"
dynmaps = composite.traverse(lambda x: x, [DynamicMap])
holomaps = composite.traverse(lambda x: x, ['HoloMap'])
dynamic_unbounded = any(m.unbounded for m in dynmaps)
if holomaps:
validate_unbounded_mode(holomaps, dynmaps)
elif dynamic_unbounded and not holomaps:
raise Exception("DynamicMaps in unbounded mode must be displayed alongside "
"a HoloMap to define the sampling.")
return dynmaps and not holomaps, dynamic_unbounded
def initialize_unbounded(obj, dimensions, key):
"""
Initializes any DynamicMaps in unbounded mode.
"""
select = dict(zip([d.name for d in dimensions], key))
try:
obj.select(selection_specs=[DynamicMap], **select)
except KeyError:
pass
def dynamic_update(plot, subplot, key, overlay, items):
"""
Given a plot, subplot and dynamically generated (Nd)Overlay
find the closest matching Element for that plot.
"""
match_spec = get_overlay_spec(overlay,
wrap_tuple(key),
subplot.current_frame)
specs = [(i, get_overlay_spec(overlay, wrap_tuple(k), el))
for i, (k, el) in enumerate(items)]
closest = closest_match(match_spec, specs)
if closest is None:
return closest, None, False
matched = specs[closest][1]
return closest, matched, match_spec == matched
def map_colors(arr, crange, cmap, hex=True):
"""
Maps an array of values to RGB hex strings, given
a color range and colormap.
"""
if isinstance(crange, arraylike_types):
xsorted = np.argsort(crange)
ypos = np.searchsorted(crange, arr)
arr = xsorted[ypos]
else:
if isinstance(crange, tuple):
cmin, cmax = crange
else:
cmin, cmax = np.nanmin(arr), np.nanmax(arr)
arr = (arr - cmin) / (cmax-cmin)
arr = np.ma.array(arr, mask=np.logical_not(np.isfinite(arr)))
arr = cmap(arr)
if hex:
return rgb2hex(arr)
else:
return arr
def resample_palette(palette, ncolors, categorical, cmap_categorical):
"""
Resample the number of colors in a palette to the selected number.
"""
if len(palette) != ncolors:
if categorical and cmap_categorical:
palette = [palette[i%len(palette)] for i in range(ncolors)]
else:
lpad, rpad = -0.5, 0.49999999999
indexes = np.linspace(lpad, (len(palette)-1)+rpad, ncolors)
palette = [palette[int(np.round(v))] for v in indexes]
return palette
def mplcmap_to_palette(cmap, ncolors=None, categorical=False):
"""
Converts a matplotlib colormap to palette of RGB hex strings."
"""
from matplotlib.colors import Colormap, ListedColormap
ncolors = ncolors or 256
if not isinstance(cmap, Colormap):
import matplotlib.cm as cm
# Alias bokeh Category cmaps with mpl tab cmaps
if cmap.startswith('Category'):
cmap = cmap.replace('Category', 'tab')
try:
cmap = cm.get_cmap(cmap)
except:
cmap = cm.get_cmap(cmap.lower())
if isinstance(cmap, ListedColormap):
if categorical:
palette = [rgb2hex(cmap.colors[i%cmap.N]) for i in range(ncolors)]
return palette
elif cmap.N > ncolors:
palette = [rgb2hex(c) for c in cmap(np.arange(cmap.N))]
if len(palette) != ncolors:
palette = [palette[int(v)] for v in np.linspace(0, len(palette)-1, ncolors)]
return palette
return [rgb2hex(c) for c in cmap(np.linspace(0, 1, ncolors))]
def colorcet_cmap_to_palette(cmap, ncolors=None, categorical=False):
from colorcet import palette
categories = ['glasbey']
ncolors = ncolors or 256
cmap_categorical = any(c in cmap for c in categories)
if cmap.endswith('_r'):
palette = list(reversed(palette[cmap[:-2]]))
else:
palette = palette[cmap]
return resample_palette(palette, ncolors, categorical, cmap_categorical)
def bokeh_palette_to_palette(cmap, ncolors=None, categorical=False):
from bokeh import palettes
# Handle categorical colormaps to avoid interpolation
categories = ['accent', 'category', 'dark', 'colorblind', 'pastel',
'set1', 'set2', 'set3', 'paired']
cmap_categorical = any(cat in cmap.lower() for cat in categories)
reverse = False
if cmap.endswith('_r'):
cmap = cmap[:-2]
reverse = True
# Some colormaps are inverted compared to matplotlib
inverted = (not cmap_categorical and not cmap.capitalize() in palettes.mpl
and not cmap.startswith('fire'))
if inverted:
reverse=not reverse
ncolors = ncolors or 256
# Alias mpl tab cmaps with bokeh Category cmaps
if cmap.startswith('tab'):
cmap = cmap.replace('tab', 'Category')
# Process as bokeh palette
if cmap in palettes.all_palettes:
palette = palettes.all_palettes[cmap]
else:
palette = getattr(palettes, cmap, getattr(palettes, cmap.capitalize(), None))
if palette is None:
raise ValueError("Supplied palette %s not found among bokeh palettes" % cmap)
elif isinstance(palette, dict) and (cmap in palette or cmap.capitalize() in palette):
# Some bokeh palettes are doubly nested
palette = palette.get(cmap, palette.get(cmap.capitalize()))
if isinstance(palette, dict):
palette = palette[max(palette)]
if not cmap_categorical:
if len(palette) < ncolors:
palette = polylinear_gradient(palette, ncolors)
elif callable(palette):
palette = palette(ncolors)
if reverse: palette = palette[::-1]
return list(resample_palette(palette, ncolors, categorical, cmap_categorical))
def linear_gradient(start_hex, finish_hex, n=10):
"""
Interpolates the color gradient between to hex colors
"""
s = hex2rgb(start_hex)
f = hex2rgb(finish_hex)
gradient = [s]
for t in range(1, n):
curr_vector = [int(s[j] + (float(t)/(n-1))*(f[j]-s[j])) for j in range(3)]
gradient.append(curr_vector)
return [rgb2hex([c/255. for c in rgb]) for rgb in gradient]
def polylinear_gradient(colors, n):
"""
Interpolates the color gradients between a list of hex colors.
"""
n_out = int(float(n) / (len(colors)-1))
gradient = linear_gradient(colors[0], colors[1], n_out)
if len(colors) == len(gradient):
return gradient
for col in range(1, len(colors) - 1):
next_colors = linear_gradient(colors[col], colors[col+1], n_out+1)
gradient += next_colors[1:] if len(next_colors) > 1 else next_colors
return gradient
cmap_info=[]
CMapInfo=namedtuple('CMapInfo',['name','provider','category','source','bg'])
providers = ['matplotlib', 'bokeh', 'colorcet']
def _list_cmaps(provider=None, records=False):
"""
List available colormaps by combining matplotlib, bokeh, and
colorcet colormaps or palettes if available. May also be
narrowed down to a particular provider or list of providers.
"""
if provider is None:
provider = providers
elif isinstance(provider, str):
if provider not in providers:
raise ValueError('Colormap provider %r not recognized, must '
'be one of %r' % (provider, providers))
provider = [provider]
cmaps = []
def info(provider,names):
return [CMapInfo(name=n,provider=provider,category=None,source=None,bg=None) for n in names] \
if records else list(names)
if 'matplotlib' in provider:
try:
import matplotlib.cm as cm
if hasattr(cm, '_cmap_registry'):
mpl_cmaps = list(cm._cmap_registry)
else:
mpl_cmaps = list(cm.cmaps_listed)+list(cm.datad)
cmaps += info('matplotlib', mpl_cmaps)
cmaps += info('matplotlib', [cmap+'_r' for cmap in mpl_cmaps
if not cmap.endswith('_r')])
except:
pass
if 'bokeh' in provider:
try:
from bokeh import palettes
cmaps += info('bokeh', palettes.all_palettes)
cmaps += info('bokeh', [p+'_r' for p in palettes.all_palettes
if not p.endswith('_r')])
except:
pass
if 'colorcet' in provider:
try:
from colorcet import palette_n, glasbey_hv
cet_maps = palette_n.copy()
cet_maps['glasbey_hv'] = glasbey_hv # Add special hv-specific map
cmaps += info('colorcet', cet_maps)
cmaps += info('colorcet', [p+'_r' for p in cet_maps if not p.endswith('_r')])
except:
pass
return sorted(unique_iterator(cmaps))
def register_cmaps(category, provider, source, bg, names):
"""
Maintain descriptions of colormaps that include the following information:
name - string name for the colormap
category - intended use or purpose, mostly following matplotlib
provider - package providing the colormap directly
source - original source or creator of the colormaps
bg - base/background color expected for the map
('light','dark','medium','any' (unknown or N/A))
"""
for name in names:
bisect.insort(cmap_info, CMapInfo(name=name, provider=provider,
category=category, source=source,
bg=bg))
def list_cmaps(provider=None, records=False, name=None, category=None, source=None,
bg=None, reverse=None):
"""
Return colormap names matching the specified filters.
"""
# Only uses names actually imported and currently available
available = _list_cmaps(provider=provider, records=True)
matches = set()
for avail in available:
aname=avail.name
matched=False
basename=aname[:-2] if aname.endswith('_r') else aname
if (reverse is None or
(reverse==True and aname.endswith('_r')) or
(reverse==False and not aname.endswith('_r'))):
for r in cmap_info:
if (r.name==basename):
matched=True
# cmap_info stores only non-reversed info, so construct
# suitable values for reversed version if appropriate
r=r._replace(name=aname)
if aname.endswith('_r') and (r.category != 'Diverging'):
if r.bg=='light':
r=r._replace(bg='dark')
elif r.bg=='dark':
r=r._replace(bg='light')
if (( name is None or name in r.name) and
(provider is None or provider in r.provider) and
(category is None or category in r.category) and
( source is None or source in r.source) and
( bg is None or bg in r.bg)):
matches.add(r)
if not matched and (category is None or category=='Miscellaneous'):
# Return colormaps that exist but are not found in cmap_info
# under the 'Miscellaneous' category, with no source or bg
r = CMapInfo(aname,provider=avail.provider,category='Miscellaneous',source=None,bg=None)
matches.add(r)
# Return results sorted by category if category information is provided
if records:
return list(unique_iterator(python2sort(matches,
key=lambda r: (r.category.split(" ")[-1],r.bg,r.name.lower(),r.provider,r.source))))
else:
return list(unique_iterator(sorted([rec.name for rec in matches], key=lambda n:n.lower())))
register_cmaps('Uniform Sequential', 'matplotlib', 'bids', 'dark',
['viridis', 'plasma', 'inferno', 'magma', 'cividis'])
register_cmaps('Mono Sequential', 'matplotlib', 'colorbrewer', 'light',
['Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn'])
register_cmaps('Other Sequential', 'matplotlib', 'misc', 'light',
['gist_yarg', 'binary'])
register_cmaps('Other Sequential', 'matplotlib', 'misc', 'dark',
['afmhot', 'gray', 'bone', 'gist_gray', 'gist_heat',
'hot', 'pink'])
register_cmaps('Other Sequential', 'matplotlib', 'misc', 'any',
['copper', 'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia'])
register_cmaps('Diverging', 'matplotlib', 'colorbrewer', 'light',
['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy',
'RdYlBu', 'RdYlGn', 'Spectral'])
register_cmaps('Diverging', 'matplotlib', 'misc', 'light',
['coolwarm', 'bwr', 'seismic'])
register_cmaps('Categorical', 'matplotlib', 'colorbrewer', 'any',
['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2',
'Set1', 'Set2', 'Set3'])
register_cmaps('Categorical', 'matplotlib', 'd3', 'any',
['tab10', 'tab20', 'tab20b', 'tab20c'])
register_cmaps('Rainbow', 'matplotlib', 'misc', 'dark',
['nipy_spectral', 'gist_ncar'])
register_cmaps('Rainbow', 'matplotlib', 'misc', 'any',
['brg', 'hsv', 'gist_rainbow', 'rainbow', 'jet'])
register_cmaps('Miscellaneous', 'matplotlib', 'misc', 'dark',
['CMRmap', 'cubehelix', 'gist_earth', 'gist_stern',
'gnuplot', 'gnuplot2', 'ocean', 'terrain'])
register_cmaps('Miscellaneous', 'matplotlib', 'misc', 'any',
['flag', 'prism'])
register_cmaps('Uniform Sequential', 'colorcet', 'cet', 'dark',
['bgyw', 'bgy', 'kbc', 'bmw', 'bmy', 'kgy', 'gray',
'dimgray', 'fire'])
register_cmaps('Uniform Sequential', 'colorcet', 'cet', 'any',
['blues', 'kr', 'kg', 'kb'])
register_cmaps('Uniform Diverging', 'colorcet', 'cet', 'light',
['coolwarm', 'gwv', 'bwy', 'cwr'])
register_cmaps('Uniform Diverging', 'colorcet', 'cet', 'dark',
['bkr', 'bky'])
register_cmaps('Uniform Diverging', 'colorcet', 'cet', 'medium',
['bjy'])
register_cmaps('Uniform Rainbow', 'colorcet', 'cet', 'any',
['rainbow', 'colorwheel','isolum'])
register_cmaps('Uniform Sequential', 'bokeh', 'bids', 'dark',
['Viridis', 'Plasma', 'Inferno', 'Magma'])
register_cmaps('Mono Sequential', 'bokeh', 'colorbrewer', 'light',
['Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys',
'OrRd', 'Oranges', 'PuBu', 'PuBuGn', 'PuRd', 'Purples',
'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd'])
register_cmaps('Diverging', 'bokeh', 'colorbrewer', 'light',
['BrBG', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy',
'RdYlBu', 'RdYlGn', 'Spectral'])
register_cmaps('Categorical', 'bokeh', 'd3', 'any',
['Category10', 'Category20', 'Category20b', 'Category20c'])
register_cmaps('Categorical', 'bokeh', 'colorbrewer', 'any',
['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2',
'Set1', 'Set2', 'Set3'])
register_cmaps('Categorical', 'bokeh', 'misc', 'any',
['Colorblind'])
register_cmaps('Uniform Categorical', 'colorcet', 'cet', 'any',
['glasbey', 'glasbey_cool', 'glasbey_warm', 'glasbey_hv'])
register_cmaps('Uniform Categorical', 'colorcet', 'cet', 'dark',
['glasbey_light'])
register_cmaps('Uniform Categorical', 'colorcet', 'cet', 'light',
['glasbey_dark'])
def process_cmap(cmap, ncolors=None, provider=None, categorical=False):
"""
Convert valid colormap specifications to a list of colors.
"""
providers_checked="matplotlib, bokeh, or colorcet" if provider is None else provider
if isinstance(cmap, Cycle):
palette = [rgb2hex(c) if isinstance(c, tuple) else c for c in cmap.values]
elif isinstance(cmap, tuple):
palette = list(cmap)
elif isinstance(cmap, list):
palette = cmap
elif isinstance(cmap, str):
mpl_cmaps = _list_cmaps('matplotlib')
bk_cmaps = _list_cmaps('bokeh')
cet_cmaps = _list_cmaps('colorcet')
if provider == 'matplotlib' or (provider is None and (cmap in mpl_cmaps or cmap.lower() in mpl_cmaps)):
palette = mplcmap_to_palette(cmap, ncolors, categorical)
elif provider == 'bokeh' or (provider is None and (cmap in bk_cmaps or cmap.capitalize() in bk_cmaps)):
palette = bokeh_palette_to_palette(cmap, ncolors, categorical)
elif provider == 'colorcet' or (provider is None and cmap in cet_cmaps):
palette = colorcet_cmap_to_palette(cmap, ncolors, categorical)
else:
raise ValueError("Supplied cmap %s not found among %s colormaps." %
(cmap,providers_checked))
else:
try:
# Try processing as matplotlib colormap
palette = mplcmap_to_palette(cmap, ncolors)
except:
palette = None
if not isinstance(palette, list):
raise TypeError("cmap argument %s expects a list, Cycle or valid %s colormap or palette."
% (cmap,providers_checked))
if ncolors and len(palette) != ncolors:
return [palette[i%len(palette)] for i in range(ncolors)]
return palette
def color_intervals(colors, levels, clip=None, N=255):
"""
Maps the supplied colors into bins defined by the supplied levels.
If a clip tuple is defined the bins are clipped to the defined
range otherwise the range is computed from the levels and returned.
Arguments
---------
colors: list
List of colors (usually hex string or named colors)
levels: list or array_like
Levels specifying the bins to map the colors to
clip: tuple (optional)
Lower and upper limits of the color range
N: int
Number of discrete colors to map the range onto
Returns
-------
cmap: list
List of colors
clip: tuple
Lower and upper bounds of the color range
"""
if len(colors) != len(levels)-1:
raise ValueError('The number of colors in the colormap '
'must match the intervals defined in the '
'color_levels, expected %d colors found %d.'
% (N, len(colors)))
intervals = np.diff(levels)
cmin, cmax = min(levels), max(levels)
interval = cmax-cmin
cmap = []
for intv, c in zip(intervals, colors):
cmap += [c]*int(round(N*(intv/interval)))
if clip is not None:
clmin, clmax = clip
lidx = int(round(N*((clmin-cmin)/interval)))
uidx = int(round(N*((cmax-clmax)/interval)))
uidx = N-uidx
if lidx == uidx:
uidx = lidx+1
cmap = cmap[lidx:uidx]
if clmin == clmax:
idx = np.argmin(np.abs(np.array(levels)-clmin))
clip = levels[idx: idx+2] if len(levels) > idx+2 else levels[idx-1: idx+1]
return cmap, clip
def dim_axis_label(dimensions, separator=', '):
"""
Returns an axis label for one or more dimensions.
"""
if not isinstance(dimensions, list): dimensions = [dimensions]
return separator.join([d.pprint_label for d in dimensions])
def scale_fontsize(size, scaling):
"""
Scales a numeric or string font size.
"""
ext = None
if isinstance(size, str):
match = re.match(r"[-+]?\d*\.\d+|\d+", size)
if match:
value = match.group()
ext = size.replace(value, '')
size = float(value)
else:
return size
if scaling:
size = size * scaling
if ext is not None:
size = ('%.3f' % size).rstrip('0').rstrip('.') + ext
return size
def attach_streams(plot, obj, precedence=1.1):
"""
Attaches plot refresh to all streams on the object.
"""
def append_refresh(dmap):
for stream in get_nested_streams(dmap):
if plot.refresh not in stream._subscribers:
stream.add_subscriber(plot.refresh, precedence)
return obj.traverse(append_refresh, [DynamicMap])
def traverse_setter(obj, attribute, value):
"""
Traverses the object and sets the supplied attribute on the
object. Supports Dimensioned and DimensionedPlot types.
"""
obj.traverse(lambda x: setattr(x, attribute, value))
def _get_min_distance_numpy(element):
"""
NumPy based implementation of get_min_distance
"""
xys = element.array([0, 1])
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'invalid value encountered in')
xys = xys.astype('float32').view(np.complex64)
distances = np.abs(xys.T-xys)
np.fill_diagonal(distances, np.inf)
distances = distances[distances>0]
if len(distances):
return distances.min()
return 0
def get_min_distance(element):
"""
Gets the minimum sampling distance of the x- and y-coordinates
in a grid.
"""
try:
from scipy.spatial.distance import pdist
return pdist(element.array([0, 1])).min()
except:
return _get_min_distance_numpy(element)
def get_directed_graph_paths(element, arrow_length):
"""
Computes paths for a directed path which include an arrow to
indicate the directionality of each edge.
"""
edgepaths = element._split_edgepaths
edges = edgepaths.split(datatype='array', dimensions=edgepaths.kdims)
arrows = []
for e in edges:
sx, sy = e[0]
ex, ey = e[1]
rad = np.arctan2(ey-sy, ex-sx)
xa0 = ex - np.cos(rad+np.pi/8)*arrow_length
ya0 = ey - np.sin(rad+np.pi/8)*arrow_length
xa1 = ex - np.cos(rad-np.pi/8)*arrow_length
ya1 = ey - np.sin(rad-np.pi/8)*arrow_length
arrow = np.array([(sx, sy), (ex, ey), (np.nan, np.nan),
(xa0, ya0), (ex, ey), (xa1, ya1)])
arrows.append(arrow)
return arrows
def rgb2hex(rgb):
"""
Convert RGB(A) tuple to hex.
"""
if len(rgb) > 3:
rgb = rgb[:-1]
return "#{0:02x}{1:02x}{2:02x}".format(*(int(v*255) for v in rgb))
def dim_range_key(eldim):
"""
Returns the key to look up a dimension range.
"""
if isinstance(eldim, dim):
dim_name = repr(eldim)
if dim_name.startswith("dim('") and dim_name.endswith("')"):
dim_name = dim_name[5:-2]
else:
dim_name = eldim.name
return dim_name
def hex2rgb(hex):
''' "#FFFFFF" -> [255,255,255] '''
# Pass 16 to the integer function for change of base
return [int(hex[i:i+2], 16) for i in range(1,6,2)]
class apply_nodata(Operation):
nodata = param.Integer(default=None, doc="""
Optional missing-data value for integer data.
If non-None, data with this value will be replaced with NaN so
that it is transparent (by default) when plotted.""")
def _replace_value(self, data):
"Replace `nodata` value in data with NaN, if specified in opts"
data = data.astype('float64')
mask = data!=self.p.nodata
if hasattr(data, 'where'):
return data.where(mask, np.NaN)
return np.where(mask, data, np.NaN)
def _process(self, element, key=None):
if self.p.nodata is None:
return element
if hasattr(element, 'interface'):
vdim = element.vdims[0]
dtype = element.interface.dtype(element, vdim)
if dtype.kind not in 'iu':
return element
transform = dim(vdim, self._replace_value)
return element.transform(**{vdim.name: transform})
else:
array = element.dimension_values(2, flat=False).T
if array.dtype.kind not in 'iu':
return element
array = array.astype('float64')
return element.clone(self._replace_value(array))
RGB_HEX_REGEX = re.compile(r'^#(?:[0-9a-fA-F]{3}){1,2}$')
COLOR_ALIASES = {
'b': (0, 0, 1),
'c': (0, 0.75, 0.75),
'g': (0, 0.5, 0),
'k': (0, 0, 0),
'm': (0.75, 0, 0.75),
'r': (1, 0, 0),
'w': (1, 1, 1),
'y': (0.75, 0.75, 0),
'transparent': (0, 0, 0, 0)
}
# linear_kryw_0_100_c71 (aka "fire"):
# A perceptually uniform equivalent of matplotlib's "hot" colormap, from
# http://peterkovesi.com/projects/colourmaps
fire_colors = linear_kryw_0_100_c71 = [\
[0, 0, 0 ], [0.027065, 2.143e-05, 0 ],
[0.052054, 7.4728e-05, 0 ], [0.071511, 0.00013914, 0 ],
[0.08742, 0.0002088, 0 ], [0.10109, 0.00028141, 0 ],
[0.11337, 0.000356, 2.4266e-17], [0.12439, 0.00043134, 3.3615e-17],
[0.13463, 0.00050796, 2.1604e-17], [0.14411, 0.0005856, 0 ],
[0.15292, 0.00070304, 0 ], [0.16073, 0.0013432, 0 ],
[0.16871, 0.0014516, 0 ], [0.17657, 0.0012408, 0 ],
[0.18364, 0.0015336, 0 ], [0.19052, 0.0017515, 0 ],
[0.19751, 0.0015146, 0 ], [0.20401, 0.0015249, 0 ],
[0.20994, 0.0019639, 0 ], [0.21605, 0.002031, 0 ],
[0.22215, 0.0017559, 0 ], [0.22808, 0.001546, 1.8755e-05],
[0.23378, 0.0016315, 3.5012e-05], [0.23955, 0.0017194, 3.3352e-05],
[0.24531, 0.0018097, 1.8559e-05], [0.25113, 0.0019038, 1.9139e-05],
[0.25694, 0.0020015, 3.5308e-05], [0.26278, 0.0021017, 3.2613e-05],
[0.26864, 0.0022048, 2.0338e-05], [0.27451, 0.0023119, 2.2453e-05],
[0.28041, 0.0024227, 3.6003e-05], [0.28633, 0.0025363, 2.9817e-05],
[0.29229, 0.0026532, 1.9559e-05], [0.29824, 0.0027747, 2.7666e-05],
[0.30423, 0.0028999, 3.5752e-05], [0.31026, 0.0030279, 2.3231e-05],
[0.31628, 0.0031599, 1.2902e-05], [0.32232, 0.0032974, 3.2915e-05],
[0.32838, 0.0034379, 3.2803e-05], [0.33447, 0.0035819, 2.0757e-05],
[0.34057, 0.003731, 2.3831e-05], [0.34668, 0.0038848, 3.502e-05 ],
[0.35283, 0.0040418, 2.4468e-05], [0.35897, 0.0042032, 1.1444e-05],
[0.36515, 0.0043708, 3.2793e-05], [0.37134, 0.0045418, 3.012e-05 ],
[0.37756, 0.0047169, 1.4846e-05], [0.38379, 0.0048986, 2.796e-05 ],
[0.39003, 0.0050848, 3.2782e-05], [0.3963, 0.0052751, 1.9244e-05],
[0.40258, 0.0054715, 2.2667e-05], [0.40888, 0.0056736, 3.3223e-05],
[0.41519, 0.0058798, 2.159e-05 ], [0.42152, 0.0060922, 1.8214e-05],
[0.42788, 0.0063116, 3.2525e-05], [0.43424, 0.0065353, 2.2247e-05],
[0.44062, 0.006765, 1.5852e-05], [0.44702, 0.0070024, 3.1769e-05],
[0.45344, 0.0072442, 2.1245e-05], [0.45987, 0.0074929, 1.5726e-05],
[0.46631, 0.0077499, 3.0976e-05], [0.47277, 0.0080108, 1.8722e-05],
[0.47926, 0.0082789, 1.9285e-05], [0.48574, 0.0085553, 3.0063e-05],
[0.49225, 0.0088392, 1.4313e-05], [0.49878, 0.0091356, 2.3404e-05],
[0.50531, 0.0094374, 2.8099e-05], [0.51187, 0.0097365, 6.4695e-06],
[0.51844, 0.010039, 2.5791e-05], [0.52501, 0.010354, 2.4393e-05],
[0.53162, 0.010689, 1.6037e-05], [0.53825, 0.011031, 2.7295e-05],
[0.54489, 0.011393, 1.5848e-05], [0.55154, 0.011789, 2.3111e-05],
[0.55818, 0.012159, 2.5416e-05], [0.56485, 0.012508, 1.5064e-05],
[0.57154, 0.012881, 2.541e-05 ], [0.57823, 0.013283, 1.6166e-05],
[0.58494, 0.013701, 2.263e-05 ], [0.59166, 0.014122, 2.3316e-05],
[0.59839, 0.014551, 1.9432e-05], [0.60514, 0.014994, 2.4323e-05],
[0.6119, 0.01545, 1.3929e-05], [0.61868, 0.01592, 2.1615e-05],
[0.62546, 0.016401, 1.5846e-05], [0.63226, 0.016897, 2.0838e-05],
[0.63907, 0.017407, 1.9549e-05], [0.64589, 0.017931, 2.0961e-05],
[0.65273, 0.018471, 2.0737e-05], [0.65958, 0.019026, 2.0621e-05],
[0.66644, 0.019598, 2.0675e-05], [0.67332, 0.020187, 2.0301e-05],
[0.68019, 0.020793, 2.0029e-05], [0.68709, 0.021418, 2.0088e-05],
[0.69399, 0.022062, 1.9102e-05], [0.70092, 0.022727, 1.9662e-05],
[0.70784, 0.023412, 1.7757e-05], [0.71478, 0.024121, 1.8236e-05],
[0.72173, 0.024852, 1.4944e-05], [0.7287, 0.025608, 2.0245e-06],
[0.73567, 0.02639, 1.5013e-07], [0.74266, 0.027199, 0 ],
[0.74964, 0.028038, 0 ], [0.75665, 0.028906, 0 ],
[0.76365, 0.029806, 0 ], [0.77068, 0.030743, 0 ],
[0.77771, 0.031711, 0 ], [0.78474, 0.032732, 0 ],
[0.79179, 0.033741, 0 ], [0.79886, 0.034936, 0 ],
[0.80593, 0.036031, 0 ], [0.81299, 0.03723, 0 ],
[0.82007, 0.038493, 0 ], [0.82715, 0.039819, 0 ],
[0.83423, 0.041236, 0 ], [0.84131, 0.042647, 0 ],
[0.84838, 0.044235, 0 ], [0.85545, 0.045857, 0 ],
[0.86252, 0.047645, 0 ], [0.86958, 0.049578, 0 ],
[0.87661, 0.051541, 0 ], [0.88365, 0.053735, 0 ],
[0.89064, 0.056168, 0 ], [0.89761, 0.058852, 0 ],
[0.90451, 0.061777, 0 ], [0.91131, 0.065281, 0 ],
[0.91796, 0.069448, 0 ], [0.92445, 0.074684, 0 ],
[0.93061, 0.08131, 0 ], [0.93648, 0.088878, 0 ],
[0.94205, 0.097336, 0 ], [0.9473, 0.10665, 0 ],
[0.9522, 0.1166, 0 ], [0.95674, 0.12716, 0 ],
[0.96094, 0.13824, 0 ], [0.96479, 0.14963, 0 ],
[0.96829, 0.16128, 0 ], [0.97147, 0.17303, 0 ],
[0.97436, 0.18489, 0 ], [0.97698, 0.19672, 0 ],
[0.97934, 0.20846, 0 ], [0.98148, 0.22013, 0 ],
[0.9834, 0.23167, 0 ], [0.98515, 0.24301, 0 ],
[0.98672, 0.25425, 0 ], [0.98815, 0.26525, 0 ],
[0.98944, 0.27614, 0 ], [0.99061, 0.28679, 0 ],
[0.99167, 0.29731, 0 ], [0.99263, 0.30764, 0 ],
[0.9935, 0.31781, 0 ], [0.99428, 0.3278, 0 ],
[0.995, 0.33764, 0 ], [0.99564, 0.34735, 0 ],
[0.99623, 0.35689, 0 ], [0.99675, 0.3663, 0 ],
[0.99722, 0.37556, 0 ], [0.99765, 0.38471, 0 ],
[0.99803, 0.39374, 0 ], [0.99836, 0.40265, 0 ],
[0.99866, 0.41145, 0 ], [0.99892, 0.42015, 0 ],
[0.99915, 0.42874, 0 ], [0.99935, 0.43724, 0 ],
[0.99952, 0.44563, 0 ], [0.99966, 0.45395, 0 ],
[0.99977, 0.46217, 0 ], [0.99986, 0.47032, 0 ],
[0.99993, 0.47838, 0 ], [0.99997, 0.48638, 0 ],
[1, 0.4943, 0 ], [1, 0.50214, 0 ],
[1, 0.50991, 1.2756e-05], [1, 0.51761, 4.5388e-05],
[1, 0.52523, 9.6977e-05], [1, 0.5328, 0.00016858],
[1, 0.54028, 0.0002582 ], [1, 0.54771, 0.00036528],
[1, 0.55508, 0.00049276], [1, 0.5624, 0.00063955],
[1, 0.56965, 0.00080443], [1, 0.57687, 0.00098902],
[1, 0.58402, 0.0011943 ], [1, 0.59113, 0.0014189 ],
[1, 0.59819, 0.0016626 ], [1, 0.60521, 0.0019281 ],
[1, 0.61219, 0.0022145 ], [1, 0.61914, 0.0025213 ],
[1, 0.62603, 0.0028496 ], [1, 0.6329, 0.0032006 ],
[1, 0.63972, 0.0035741 ], [1, 0.64651, 0.0039701 ],
[1, 0.65327, 0.0043898 ], [1, 0.66, 0.0048341 ],
[1, 0.66669, 0.005303 ], [1, 0.67336, 0.0057969 ],
[1, 0.67999, 0.006317 ], [1, 0.68661, 0.0068648 ],
[1, 0.69319, 0.0074406 ], [1, 0.69974, 0.0080433 ],
[1, 0.70628, 0.0086756 ], [1, 0.71278, 0.0093486 ],
[1, 0.71927, 0.010023 ], [1, 0.72573, 0.010724 ],
[1, 0.73217, 0.011565 ], [1, 0.73859, 0.012339 ],
[1, 0.74499, 0.01316 ], [1, 0.75137, 0.014042 ],
[1, 0.75772, 0.014955 ], [1, 0.76406, 0.015913 ],
[1, 0.77039, 0.016915 ], [1, 0.77669, 0.017964 ],
[1, 0.78298, 0.019062 ], [1, 0.78925, 0.020212 ],
[1, 0.7955, 0.021417 ], [1, 0.80174, 0.02268 ],
[1, 0.80797, 0.024005 ], [1, 0.81418, 0.025396 ],
[1, 0.82038, 0.026858 ], [1, 0.82656, 0.028394 ],
[1, 0.83273, 0.030013 ], [1, 0.83889, 0.031717 ],
[1, 0.84503, 0.03348 ], [1, 0.85116, 0.035488 ],
[1, 0.85728, 0.037452 ], [1, 0.8634, 0.039592 ],
[1, 0.86949, 0.041898 ], [1, 0.87557, 0.044392 ],
[1, 0.88165, 0.046958 ], [1, 0.88771, 0.04977 ],
[1, 0.89376, 0.052828 ], [1, 0.8998, 0.056209 ],
[1, 0.90584, 0.059919 ], [1, 0.91185, 0.063925 ],
[1, 0.91783, 0.068579 ], [1, 0.92384, 0.073948 ],
[1, 0.92981, 0.080899 ], [1, 0.93576, 0.090648 ],
[1, 0.94166, 0.10377 ], [1, 0.94752, 0.12051 ],
[1, 0.9533, 0.14149 ], [1, 0.959, 0.1672 ],
[1, 0.96456, 0.19823 ], [1, 0.96995, 0.23514 ],
[1, 0.9751, 0.2786 ], [1, 0.97992, 0.32883 ],
[1, 0.98432, 0.38571 ], [1, 0.9882, 0.44866 ],
[1, 0.9915, 0.51653 ], [1, 0.99417, 0.58754 ],
[1, 0.99625, 0.65985 ], [1, 0.99778, 0.73194 ],
[1, 0.99885, 0.80259 ], [1, 0.99953, 0.87115 ],
[1, 0.99989, 0.93683 ], [1, 1, 1 ]]
# Bokeh palette
fire = [str('#{0:02x}{1:02x}{2:02x}'.format(int(r*255),int(g*255),int(b*255)))
for r,g,b in fire_colors]
| 40.295349 | 111 | 0.582655 |
d45ed175d16c434c7a64393ee3241a7778c9dc6b | 418 | py | Python | assignment_5/eclipse.py | slchangtw/Advanced_Programming_in_Python_Jacobs | 46df6a77e23d92fc97c15b6112a3f428b2bbcb42 | [
"MIT"
] | null | null | null | assignment_5/eclipse.py | slchangtw/Advanced_Programming_in_Python_Jacobs | 46df6a77e23d92fc97c15b6112a3f428b2bbcb42 | [
"MIT"
] | null | null | null | assignment_5/eclipse.py | slchangtw/Advanced_Programming_in_Python_Jacobs | 46df6a77e23d92fc97c15b6112a3f428b2bbcb42 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# JTSK-350112
# a5_4.py
# Shun-Lung Chang
# sh.chang@jacobs-university.de
import datetime
if __name__ == '__main__':
# compute days that have passed
last_eclipse = datetime.date(2017, 8, 21)
today = datetime.date.today()
delta = (today - last_eclipse).days
print("It has been {0} days since last total solar eclipse.".format(delta))
| 20.9 | 79 | 0.650718 |
7ba09212a24ff95d81c179d32d15cfd3c1e8afcd | 40,584 | py | Python | venv/Lib/site-packages/win32/test/test_win32file.py | dasxran/seleniumMachineLearning | 3098f836913a89847cb9e308189383a4ea981139 | [
"MIT"
] | 64 | 2020-07-22T06:24:18.000Z | 2022-03-27T10:48:15.000Z | venv/Lib/site-packages/win32/test/test_win32file.py | dasxran/seleniumMachineLearning | 3098f836913a89847cb9e308189383a4ea981139 | [
"MIT"
] | 9 | 2019-12-28T06:18:53.000Z | 2022-01-13T01:54:21.000Z | venv/Lib/site-packages/win32/test/test_win32file.py | dasxran/seleniumMachineLearning | 3098f836913a89847cb9e308189383a4ea981139 | [
"MIT"
] | 17 | 2020-09-14T02:46:41.000Z | 2022-03-01T09:52:33.000Z | import unittest
from pywin32_testutil import str2bytes, TestSkipped, testmain
import win32api, win32file, win32pipe, pywintypes, winerror, win32event
import win32con, ntsecuritycon
import sys
import os
import tempfile
import threading
import time
import shutil
import socket
import datetime
import random
import win32timezone
try:
set
except NameError:
from sets import Set as set
class TestReadBuffer(unittest.TestCase):
def testLen(self):
buffer = win32file.AllocateReadBuffer(1)
self.failUnlessEqual(len(buffer), 1)
def testSimpleIndex(self):
val = str2bytes('\xFF')
buffer = win32file.AllocateReadBuffer(1)
buffer[0] = val
self.failUnlessEqual(buffer[0], val)
def testSimpleSlice(self):
buffer = win32file.AllocateReadBuffer(2)
val = str2bytes('\0\0')
buffer[:2] = val
self.failUnlessEqual(buffer[0:2], val)
class TestSimpleOps(unittest.TestCase):
def testSimpleFiles(self):
fd, filename = tempfile.mkstemp()
os.close(fd)
os.unlink(filename)
handle = win32file.CreateFile(filename, win32file.GENERIC_WRITE, 0, None, win32con.CREATE_NEW, 0, None)
test_data = str2bytes("Hello\0there")
try:
win32file.WriteFile(handle, test_data)
handle.Close()
# Try and open for read
handle = win32file.CreateFile(filename, win32file.GENERIC_READ, 0, None, win32con.OPEN_EXISTING, 0, None)
rc, data = win32file.ReadFile(handle, 1024)
self.assertEquals(data, test_data)
finally:
handle.Close()
try:
os.unlink(filename)
except os.error:
pass
# A simple test using normal read/write operations.
def testMoreFiles(self):
# Create a file in the %TEMP% directory.
testName = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
desiredAccess = win32file.GENERIC_READ | win32file.GENERIC_WRITE
# Set a flag to delete the file automatically when it is closed.
fileFlags = win32file.FILE_FLAG_DELETE_ON_CLOSE
h = win32file.CreateFile( testName, desiredAccess, win32file.FILE_SHARE_READ, None, win32file.CREATE_ALWAYS, fileFlags, 0)
# Write a known number of bytes to the file.
data = str2bytes("z") * 1025
win32file.WriteFile(h, data)
self.failUnless(win32file.GetFileSize(h) == len(data), "WARNING: Written file does not have the same size as the length of the data in it!")
# Ensure we can read the data back.
win32file.SetFilePointer(h, 0, win32file.FILE_BEGIN)
hr, read_data = win32file.ReadFile(h, len(data)+10) # + 10 to get anything extra
self.failUnless(hr==0, "Readfile returned %d" % hr)
self.failUnless(read_data == data, "Read data is not what we wrote!")
# Now truncate the file at 1/2 its existing size.
newSize = len(data)//2
win32file.SetFilePointer(h, newSize, win32file.FILE_BEGIN)
win32file.SetEndOfFile(h)
self.failUnlessEqual(win32file.GetFileSize(h), newSize)
# GetFileAttributesEx/GetFileAttributesExW tests.
self.failUnlessEqual(win32file.GetFileAttributesEx(testName), win32file.GetFileAttributesExW(testName))
attr, ct, at, wt, size = win32file.GetFileAttributesEx(testName)
self.failUnless(size==newSize,
"Expected GetFileAttributesEx to return the same size as GetFileSize()")
self.failUnless(attr==win32file.GetFileAttributes(testName),
"Expected GetFileAttributesEx to return the same attributes as GetFileAttributes")
h = None # Close the file by removing the last reference to the handle!
self.failUnless(not os.path.isfile(testName), "After closing the file, it still exists!")
def testFilePointer(self):
# via [ 979270 ] SetFilePointer fails with negative offset
# Create a file in the %TEMP% directory.
filename = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
f = win32file.CreateFile(filename,
win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0,
None,
win32file.CREATE_ALWAYS,
win32file.FILE_ATTRIBUTE_NORMAL,
0)
try:
#Write some data
data = str2bytes('Some data')
(res, written) = win32file.WriteFile(f, data)
self.failIf(res)
self.assertEqual(written, len(data))
#Move at the beginning and read the data
win32file.SetFilePointer(f, 0, win32file.FILE_BEGIN)
(res, s) = win32file.ReadFile(f, len(data))
self.failIf(res)
self.assertEqual(s, data)
#Move at the end and read the data
win32file.SetFilePointer(f, -len(data), win32file.FILE_END)
(res, s) = win32file.ReadFile(f, len(data))
self.failIf(res)
self.failUnlessEqual(s, data)
finally:
f.Close()
os.unlink(filename)
def testFileTimesTimezones(self):
if not issubclass(pywintypes.TimeType, datetime.datetime):
# maybe should report 'skipped', but that's not quite right as
# there is nothing you can do to avoid it being skipped!
return
filename = tempfile.mktemp("-testFileTimes")
now_utc = win32timezone.utcnow()
now_local = now_utc.astimezone(win32timezone.TimeZoneInfo.local())
h = win32file.CreateFile(filename,
win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0, None, win32file.CREATE_ALWAYS, 0, 0)
try:
win32file.SetFileTime(h, now_utc, now_utc, now_utc)
ct, at, wt = win32file.GetFileTime(h)
self.failUnlessEqual(now_local, ct)
self.failUnlessEqual(now_local, at)
self.failUnlessEqual(now_local, wt)
# and the reverse - set local, check against utc
win32file.SetFileTime(h, now_local, now_local, now_local)
ct, at, wt = win32file.GetFileTime(h)
self.failUnlessEqual(now_utc, ct)
self.failUnlessEqual(now_utc, at)
self.failUnlessEqual(now_utc, wt)
finally:
h.close()
os.unlink(filename)
def testFileTimes(self):
if issubclass(pywintypes.TimeType, datetime.datetime):
from win32timezone import TimeZoneInfo
now = datetime.datetime.now(tz=TimeZoneInfo.local())
nowish = now + datetime.timedelta(seconds=1)
later = now + datetime.timedelta(seconds=120)
else:
rc, tzi = win32api.GetTimeZoneInformation()
bias = tzi[0]
if rc==2: # daylight-savings is in effect.
bias += tzi[-1]
bias *= 60 # minutes to seconds...
tick = int(time.time())
now = pywintypes.Time(tick+bias)
nowish = pywintypes.Time(tick+bias+1)
later = pywintypes.Time(tick+bias+120)
filename = tempfile.mktemp("-testFileTimes")
# Windows docs the 'last time' isn't valid until the last write
# handle is closed - so create the file, then re-open it to check.
open(filename,"w").close()
f = win32file.CreateFile(filename, win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0, None,
win32con.OPEN_EXISTING, 0, None)
try:
ct, at, wt = win32file.GetFileTime(f)
self.failUnless(ct >= now, "File was created in the past - now=%s, created=%s" % (now, ct))
self.failUnless( now <= ct <= nowish, (now, ct))
self.failUnless(wt >= now, "File was written-to in the past now=%s, written=%s" % (now,wt))
self.failUnless( now <= wt <= nowish, (now, wt))
# Now set the times.
win32file.SetFileTime(f, later, later, later)
# Get them back.
ct, at, wt = win32file.GetFileTime(f)
# XXX - the builtin PyTime type appears to be out by a dst offset.
# just ignore that type here...
if issubclass(pywintypes.TimeType, datetime.datetime):
self.failUnlessEqual(ct, later)
self.failUnlessEqual(at, later)
self.failUnlessEqual(wt, later)
finally:
f.Close()
os.unlink(filename)
class TestGetFileInfoByHandleEx(unittest.TestCase):
__handle = __filename = None
def setUp(self):
fd, self.__filename = tempfile.mkstemp()
os.close(fd)
def tearDown(self):
if self.__handle is not None:
self.__handle.Close()
if self.__filename is not None:
try:
os.unlink(self.__filename)
except OSError:
pass
self.__handle = self.__filename = None
def testFileBasicInfo(self):
attr = win32file.GetFileAttributes(self.__filename)
f = win32file.CreateFile(self.__filename, win32file.GENERIC_READ, 0, None,
win32con.OPEN_EXISTING, 0, None)
self.__handle = f
ct, at, wt = win32file.GetFileTime(f)
# bug #752: this throws ERROR_BAD_LENGTH (24) in x86 binaries of build 221
basic_info = win32file.GetFileInformationByHandleEx(f, win32file.FileBasicInfo)
self.assertEqual(ct, basic_info['CreationTime'])
self.assertEqual(at, basic_info['LastAccessTime'])
self.assertEqual(wt, basic_info['LastWriteTime'])
self.assertEqual(attr, basic_info['FileAttributes'])
class TestOverlapped(unittest.TestCase):
def testSimpleOverlapped(self):
# Create a file in the %TEMP% directory.
import win32event
testName = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
desiredAccess = win32file.GENERIC_WRITE
overlapped = pywintypes.OVERLAPPED()
evt = win32event.CreateEvent(None, 0, 0, None)
overlapped.hEvent = evt
# Create the file and write shit-loads of data to it.
h = win32file.CreateFile( testName, desiredAccess, 0, None, win32file.CREATE_ALWAYS, 0, 0)
chunk_data = str2bytes("z") * 0x8000
num_loops = 512
expected_size = num_loops * len(chunk_data)
for i in range(num_loops):
win32file.WriteFile(h, chunk_data, overlapped)
win32event.WaitForSingleObject(overlapped.hEvent, win32event.INFINITE)
overlapped.Offset = overlapped.Offset + len(chunk_data)
h.Close()
# Now read the data back overlapped
overlapped = pywintypes.OVERLAPPED()
evt = win32event.CreateEvent(None, 0, 0, None)
overlapped.hEvent = evt
desiredAccess = win32file.GENERIC_READ
h = win32file.CreateFile( testName, desiredAccess, 0, None, win32file.OPEN_EXISTING, 0, 0)
buffer = win32file.AllocateReadBuffer(0xFFFF)
while 1:
try:
hr, data = win32file.ReadFile(h, buffer, overlapped)
win32event.WaitForSingleObject(overlapped.hEvent, win32event.INFINITE)
overlapped.Offset = overlapped.Offset + len(data)
if not data is buffer:
self.fail("Unexpected result from ReadFile - should be the same buffer we passed it")
except win32api.error:
break
h.Close()
def testCompletionPortsMultiple(self):
# Mainly checking that we can "associate" an existing handle. This
# failed in build 203.
ioport = win32file.CreateIoCompletionPort(win32file.INVALID_HANDLE_VALUE,
0, 0, 0)
socks = []
for PORT in range(9123, 9125):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', PORT))
sock.listen(1)
socks.append(sock)
new = win32file.CreateIoCompletionPort(sock.fileno(), ioport, PORT, 0)
assert new is ioport
for s in socks:
s.close()
hv = int(ioport)
ioport = new = None
# The handle itself should be closed now (unless we leak references!)
# Check that.
try:
win32file.CloseHandle(hv)
raise RuntimeError("Expected close to fail!")
except win32file.error as details:
self.failUnlessEqual(details.winerror, winerror.ERROR_INVALID_HANDLE)
def testCompletionPortsQueued(self):
class Foo: pass
io_req_port = win32file.CreateIoCompletionPort(-1, None, 0, 0)
overlapped = pywintypes.OVERLAPPED()
overlapped.object = Foo()
win32file.PostQueuedCompletionStatus(io_req_port, 0, 99, overlapped)
errCode, bytes, key, overlapped = \
win32file.GetQueuedCompletionStatus(io_req_port, win32event.INFINITE)
self.failUnlessEqual(errCode, 0)
self.failUnless(isinstance(overlapped.object, Foo))
def _IOCPServerThread(self, handle, port, drop_overlapped_reference):
overlapped = pywintypes.OVERLAPPED()
win32pipe.ConnectNamedPipe(handle, overlapped)
if drop_overlapped_reference:
# Be naughty - the overlapped object is now dead, but
# GetQueuedCompletionStatus will still find it. Our check of
# reference counting should catch that error.
overlapped = None
# even if we fail, be sure to close the handle; prevents hangs
# on Vista 64...
try:
self.failUnlessRaises(RuntimeError,
win32file.GetQueuedCompletionStatus, port, -1)
finally:
handle.Close()
return
result = win32file.GetQueuedCompletionStatus(port, -1)
ol2 = result[-1]
self.failUnless(ol2 is overlapped)
data = win32file.ReadFile(handle, 512)[1]
win32file.WriteFile(handle, data)
def testCompletionPortsNonQueued(self, test_overlapped_death = 0):
# In 204 we had a reference count bug when OVERLAPPED objects were
# associated with a completion port other than via
# PostQueuedCompletionStatus. This test is based on the reproduction
# reported with that bug.
# Create the pipe.
BUFSIZE = 512
pipe_name = r"\\.\pipe\pywin32_test_pipe"
handle = win32pipe.CreateNamedPipe(pipe_name,
win32pipe.PIPE_ACCESS_DUPLEX|
win32file.FILE_FLAG_OVERLAPPED,
win32pipe.PIPE_TYPE_MESSAGE|
win32pipe.PIPE_READMODE_MESSAGE|
win32pipe.PIPE_WAIT,
1, BUFSIZE, BUFSIZE,
win32pipe.NMPWAIT_WAIT_FOREVER,
None)
# Create an IOCP and associate it with the handle.
port = win32file.CreateIoCompletionPort(-1, 0, 0, 0)
win32file.CreateIoCompletionPort(handle, port, 1, 0)
t = threading.Thread(target=self._IOCPServerThread, args=(handle,port, test_overlapped_death))
t.setDaemon(True) # avoid hanging entire test suite on failure.
t.start()
try:
time.sleep(0.1) # let thread do its thing.
try:
win32pipe.CallNamedPipe(r"\\.\pipe\pywin32_test_pipe", str2bytes("Hello there"), BUFSIZE, 0)
except win32pipe.error:
# Testing for overlapped death causes this
if not test_overlapped_death:
raise
finally:
if not test_overlapped_death:
handle.Close()
t.join(3)
self.failIf(t.isAlive(), "thread didn't finish")
def testCompletionPortsNonQueuedBadReference(self):
self.testCompletionPortsNonQueued(True)
def testHashable(self):
overlapped = pywintypes.OVERLAPPED()
d = {}
d[overlapped] = "hello"
self.failUnlessEqual(d[overlapped], "hello")
def testComparable(self):
overlapped = pywintypes.OVERLAPPED()
self.failUnlessEqual(overlapped, overlapped)
# ensure we explicitly test the operators.
self.failUnless(overlapped == overlapped)
self.failIf(overlapped != overlapped)
def testComparable2(self):
# 2 overlapped objects compare equal if their contents are the same.
overlapped1 = pywintypes.OVERLAPPED()
overlapped2 = pywintypes.OVERLAPPED()
self.failUnlessEqual(overlapped1, overlapped2)
# ensure we explicitly test the operators.
self.failUnless(overlapped1 == overlapped2)
self.failIf(overlapped1 != overlapped2)
# now change something in one of them - should no longer be equal.
overlapped1.hEvent = 1
self.failIfEqual(overlapped1, overlapped2)
# ensure we explicitly test the operators.
self.failIf(overlapped1 == overlapped2)
self.failUnless(overlapped1 != overlapped2)
class TestSocketExtensions(unittest.TestCase):
def acceptWorker(self, port, running_event, stopped_event):
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.bind(('', port))
listener.listen(200)
# create accept socket
accepter = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# An overlapped
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# accept the connection.
# We used to allow strings etc to be passed here, and they would be
# modified! Obviously this is evil :)
buffer = " " * 1024 # EVIL - SHOULD NOT BE ALLOWED.
self.assertRaises(TypeError, win32file.AcceptEx, listener, accepter, buffer, overlapped)
# This is the correct way to allocate the buffer...
buffer = win32file.AllocateReadBuffer(1024)
rc = win32file.AcceptEx(listener, accepter, buffer, overlapped)
self.failUnlessEqual(rc, winerror.ERROR_IO_PENDING)
# Set the event to say we are all ready
running_event.set()
# and wait for the connection.
rc = win32event.WaitForSingleObject(overlapped.hEvent, 2000)
if rc == win32event.WAIT_TIMEOUT:
self.fail("timed out waiting for a connection")
nbytes = win32file.GetOverlappedResult(listener.fileno(), overlapped, False)
#fam, loc, rem = win32file.GetAcceptExSockaddrs(accepter, buffer)
accepter.send(buffer[:nbytes])
# NOT set in a finally - this means *successfully* stopped!
stopped_event.set()
def testAcceptEx(self):
port = 4680
running = threading.Event()
stopped = threading.Event()
t = threading.Thread(target=self.acceptWorker, args=(port, running,stopped))
t.start()
running.wait(2)
if not running.isSet():
self.fail("AcceptEx Worker thread failed to start")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', port))
win32file.WSASend(s, str2bytes("hello"), None)
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# Like above - WSARecv used to allow strings as the receive buffer!!
buffer = " " * 10
self.assertRaises(TypeError, win32file.WSARecv, s, buffer, overlapped)
# This one should work :)
buffer = win32file.AllocateReadBuffer(10)
win32file.WSARecv(s, buffer, overlapped)
nbytes = win32file.GetOverlappedResult(s.fileno(), overlapped, True)
got = buffer[:nbytes]
self.failUnlessEqual(got, str2bytes("hello"))
# thread should have stopped
stopped.wait(2)
if not stopped.isSet():
self.fail("AcceptEx Worker thread failed to successfully stop")
class TestFindFiles(unittest.TestCase):
def testIter(self):
dir = os.path.join(os.getcwd(), "*")
files = win32file.FindFilesW(dir)
set1 = set()
set1.update(files)
set2 = set()
for file in win32file.FindFilesIterator(dir):
set2.add(file)
assert len(set2) > 5, "This directory has less than 5 files!?"
self.failUnlessEqual(set1, set2)
def testBadDir(self):
dir = os.path.join(os.getcwd(), "a dir that doesnt exist", "*")
self.assertRaises(win32file.error, win32file.FindFilesIterator, dir)
def testEmptySpec(self):
spec = os.path.join(os.getcwd(), "*.foo_bar")
num = 0
for i in win32file.FindFilesIterator(spec):
num += 1
self.failUnlessEqual(0, num)
def testEmptyDir(self):
test_path = os.path.join(win32api.GetTempPath(), "win32file_test_directory")
try:
# Note: previously used shutil.rmtree, but when looking for
# reference count leaks, that function showed leaks! os.rmdir
# doesn't have that problem.
os.rmdir(test_path)
except os.error:
pass
os.mkdir(test_path)
try:
num = 0
for i in win32file.FindFilesIterator(os.path.join(test_path, "*")):
num += 1
# Expecting "." and ".." only
self.failUnlessEqual(2, num)
finally:
os.rmdir(test_path)
class TestDirectoryChanges(unittest.TestCase):
num_test_dirs = 1
def setUp(self):
self.watcher_threads = []
self.watcher_thread_changes = []
self.dir_names = []
self.dir_handles = []
for i in range(self.num_test_dirs):
td = tempfile.mktemp("-test-directory-changes-%d" % i)
os.mkdir(td)
self.dir_names.append(td)
hdir = win32file.CreateFile(td,
ntsecuritycon.FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ,
None, # security desc
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS |
win32con.FILE_FLAG_OVERLAPPED,
None)
self.dir_handles.append(hdir)
changes = []
t = threading.Thread(target=self._watcherThreadOverlapped,
args=(td, hdir, changes))
t.start()
self.watcher_threads.append(t)
self.watcher_thread_changes.append(changes)
def _watcherThread(self, dn, dh, changes):
# A synchronous version:
# XXX - not used - I was having a whole lot of problems trying to
# get this to work. Specifically:
# * ReadDirectoryChangesW without an OVERLAPPED blocks infinitely.
# * If another thread attempts to close the handle while
# ReadDirectoryChangesW is waiting on it, the ::CloseHandle() method
# blocks (which has nothing to do with the GIL - it is correctly
# managed)
# Which ends up with no way to kill the thread!
flags = win32con.FILE_NOTIFY_CHANGE_FILE_NAME
while 1:
try:
print("waiting", dh)
changes = win32file.ReadDirectoryChangesW(dh,
8192,
False, #sub-tree
flags)
print("got", changes)
except:
raise
changes.extend(changes)
def _watcherThreadOverlapped(self, dn, dh, changes):
flags = win32con.FILE_NOTIFY_CHANGE_FILE_NAME
buf = win32file.AllocateReadBuffer(8192)
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
while 1:
win32file.ReadDirectoryChangesW(dh,
buf,
False, #sub-tree
flags,
overlapped)
# Wait for our event, or for 5 seconds.
rc = win32event.WaitForSingleObject(overlapped.hEvent, 5000)
if rc == win32event.WAIT_OBJECT_0:
# got some data! Must use GetOverlappedResult to find out
# how much is valid! 0 generally means the handle has
# been closed. Blocking is OK here, as the event has
# already been set.
nbytes = win32file.GetOverlappedResult(dh, overlapped, True)
if nbytes:
bits = win32file.FILE_NOTIFY_INFORMATION(buf, nbytes)
changes.extend(bits)
else:
# This is "normal" exit - our 'tearDown' closes the
# handle.
# print "looks like dir handle was closed!"
return
else:
print("ERROR: Watcher thread timed-out!")
return # kill the thread!
def tearDown(self):
# be careful about raising errors at teardown!
for h in self.dir_handles:
# See comments in _watcherThread above - this appears to
# deadlock if a synchronous ReadDirectoryChangesW is waiting...
# (No such problems with an asynch ReadDirectoryChangesW)
h.Close()
for dn in self.dir_names:
try:
shutil.rmtree(dn)
except OSError:
print("FAILED to remove directory", dn)
for t in self.watcher_threads:
# closing dir handle should have killed threads!
t.join(5)
if t.isAlive():
print("FAILED to wait for thread termination")
def stablize(self):
time.sleep(0.5)
def testSimple(self):
self.stablize()
for dn in self.dir_names:
fn = os.path.join(dn, "test_file")
open(fn, "w").close()
self.stablize()
changes = self.watcher_thread_changes[0]
self.failUnlessEqual(changes, [(1, "test_file")])
def testSmall(self):
self.stablize()
for dn in self.dir_names:
fn = os.path.join(dn, "x")
open(fn, "w").close()
self.stablize()
changes = self.watcher_thread_changes[0]
self.failUnlessEqual(changes, [(1, "x")])
class TestEncrypt(unittest.TestCase):
def testEncrypt(self):
fname = tempfile.mktemp("win32file_test")
f = open(fname, "wb")
f.write(str2bytes("hello"))
f.close()
f = None
try:
try:
win32file.EncryptFile(fname)
except win32file.error as details:
if details.winerror != winerror.ERROR_ACCESS_DENIED:
raise
print("It appears this is not NTFS - cant encrypt/decrypt")
win32file.DecryptFile(fname)
finally:
if f is not None:
f.close()
os.unlink(fname)
class TestConnect(unittest.TestCase):
def connect_thread_runner(self, expect_payload, giveup_event):
# As Windows 2000 doesn't do ConnectEx, we need to use a non-blocking
# accept, as our test connection may never come. May as well use
# AcceptEx for this...
listener = socket.socket()
self.addr = ('localhost', random.randint(10000,64000))
listener.bind(self.addr)
listener.listen(1)
# create accept socket
accepter = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# An overlapped
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# accept the connection.
if expect_payload:
buf_size = 1024
else:
# when we don't expect data we must be careful to only pass the
# exact number of bytes for the endpoint data...
buf_size = win32file.CalculateSocketEndPointSize(listener)
buffer = win32file.AllocateReadBuffer(buf_size)
win32file.AcceptEx(listener, accepter, buffer, overlapped)
# wait for the connection or our test to fail.
events = giveup_event, overlapped.hEvent
rc = win32event.WaitForMultipleObjects(events, False, 2000)
if rc == win32event.WAIT_TIMEOUT:
self.fail("timed out waiting for a connection")
if rc == win32event.WAIT_OBJECT_0:
# Our main thread running the test failed and will never connect.
return
# must be a connection.
nbytes = win32file.GetOverlappedResult(listener.fileno(), overlapped, False)
if expect_payload:
self.request = buffer[:nbytes]
accepter.send(str2bytes('some expected response'))
def test_connect_with_payload(self):
giveup_event = win32event.CreateEvent(None, 0, 0, None)
t = threading.Thread(target=self.connect_thread_runner,
args=(True, giveup_event))
t.start()
time.sleep(0.1)
s2 = socket.socket()
ol = pywintypes.OVERLAPPED()
s2.bind(('0.0.0.0', 0)) # connectex requires the socket be bound beforehand
try:
win32file.ConnectEx(s2, self.addr, ol, str2bytes("some expected request"))
except win32file.error as exc:
win32event.SetEvent(giveup_event)
if exc.winerror == 10022: # WSAEINVAL
raise TestSkipped("ConnectEx is not available on this platform")
raise # some error error we don't expect.
win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
buff = win32file.AllocateReadBuffer(1024)
win32file.WSARecv(s2, buff, ol, 0)
length = win32file.GetOverlappedResult(s2.fileno(), ol, 1)
self.response = buff[:length]
self.assertEqual(self.response, str2bytes('some expected response'))
self.assertEqual(self.request, str2bytes('some expected request'))
t.join(5)
self.failIf(t.isAlive(), "worker thread didn't terminate")
def test_connect_without_payload(self):
giveup_event = win32event.CreateEvent(None, 0, 0, None)
t = threading.Thread(target=self.connect_thread_runner,
args=(False, giveup_event))
t.start()
time.sleep(0.1)
s2 = socket.socket()
ol = pywintypes.OVERLAPPED()
s2.bind(('0.0.0.0', 0)) # connectex requires the socket be bound beforehand
try:
win32file.ConnectEx(s2, self.addr, ol)
except win32file.error as exc:
win32event.SetEvent(giveup_event)
if exc.winerror == 10022: # WSAEINVAL
raise TestSkipped("ConnectEx is not available on this platform")
raise # some error error we don't expect.
win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
buff = win32file.AllocateReadBuffer(1024)
win32file.WSARecv(s2, buff, ol, 0)
length = win32file.GetOverlappedResult(s2.fileno(), ol, 1)
self.response = buff[:length]
self.assertEqual(self.response, str2bytes('some expected response'))
t.join(5)
self.failIf(t.isAlive(), "worker thread didn't terminate")
class TestTransmit(unittest.TestCase):
def test_transmit(self):
import binascii
bytes = os.urandom(1024*1024)
val = binascii.hexlify(bytes)
val_length = len(val)
f = tempfile.TemporaryFile()
f.write(val)
def runner():
s1 = socket.socket()
self.addr = ('localhost', random.randint(10000,64000))
s1.bind(self.addr)
s1.listen(1)
cli, addr = s1.accept()
buf = 1
self.request = []
while buf:
buf = cli.recv(1024*100)
self.request.append(buf)
th = threading.Thread(target=runner)
th.start()
time.sleep(0.5)
s2 = socket.socket()
s2.connect(self.addr)
length = 0
aaa = str2bytes("[AAA]")
bbb = str2bytes("[BBB]")
ccc = str2bytes("[CCC]")
ddd = str2bytes("[DDD]")
empty = str2bytes("")
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, aaa, bbb)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, empty, empty)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, None, ccc)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, ddd)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
s2.close()
th.join()
buf = str2bytes('').join(self.request)
self.assertEqual(length, len(buf))
expected = val + aaa + val + bbb + val + val + ccc + ddd + val
self.assertEqual(type(expected), type(buf))
self.assert_(expected == buf)
class TestWSAEnumNetworkEvents(unittest.TestCase):
def test_basics(self):
s = socket.socket()
e = win32event.CreateEvent(None, 1, 0, None)
win32file.WSAEventSelect(s, e, 0)
self.assertEquals(win32file.WSAEnumNetworkEvents(s), {})
self.assertEquals(win32file.WSAEnumNetworkEvents(s, e), {})
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, s, e, 3)
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, s, "spam")
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, "spam", e)
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, "spam")
f = open("NUL")
h = win32file._get_osfhandle(f.fileno())
self.assertRaises(win32file.error, win32file.WSAEnumNetworkEvents, h)
self.assertRaises(win32file.error, win32file.WSAEnumNetworkEvents, s, h)
try:
win32file.WSAEnumNetworkEvents(h)
except win32file.error as e:
self.assertEquals(e.winerror, win32file.WSAENOTSOCK)
try:
win32file.WSAEnumNetworkEvents(s, h)
except win32file.error as e:
# According to the docs it would seem reasonable that
# this would fail with WSAEINVAL, but it doesn't.
self.assertEquals(e.winerror, win32file.WSAENOTSOCK)
def test_functional(self):
# This is not really a unit test, but it does exercise the code
# quite well and can serve as an example of WSAEventSelect and
# WSAEnumNetworkEvents usage.
port = socket.socket()
port.setblocking(0)
port_event = win32event.CreateEvent(None, 0, 0, None)
win32file.WSAEventSelect(port, port_event,
win32file.FD_ACCEPT |
win32file.FD_CLOSE)
port.bind(("127.0.0.1", 0))
port.listen(10)
client = socket.socket()
client.setblocking(0)
client_event = win32event.CreateEvent(None, 0, 0, None)
win32file.WSAEventSelect(client, client_event,
win32file.FD_CONNECT |
win32file.FD_READ |
win32file.FD_WRITE |
win32file.FD_CLOSE)
err = client.connect_ex(port.getsockname())
self.assertEquals(err, win32file.WSAEWOULDBLOCK)
res = win32event.WaitForSingleObject(port_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(port, port_event)
self.assertEquals(events, {win32file.FD_ACCEPT: 0})
server, addr = port.accept()
server.setblocking(0)
server_event = win32event.CreateEvent(None, 1, 0, None)
win32file.WSAEventSelect(server, server_event,
win32file.FD_READ |
win32file.FD_WRITE |
win32file.FD_CLOSE)
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(server, server_event)
self.assertEquals(events, {win32file.FD_WRITE: 0})
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_CONNECT: 0,
win32file.FD_WRITE: 0})
sent = 0
data = str2bytes("x") * 16 * 1024
while sent < 16 * 1024 * 1024:
try:
sent += client.send(data)
except socket.error as e:
if e.args[0] == win32file.WSAEINTR:
continue
elif e.args[0] in (win32file.WSAEWOULDBLOCK, win32file.WSAENOBUFS):
break
else:
raise
else:
self.fail("could not find socket buffer limit")
events = win32file.WSAEnumNetworkEvents(client)
self.assertEquals(events, {})
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(server, server_event)
self.assertEquals(events, {win32file.FD_READ: 0})
received = 0
while received < sent:
try:
received += len(server.recv(16 * 1024))
except socket.error as e:
if e.args[0] in [win32file.WSAEINTR, win32file.WSAEWOULDBLOCK]:
continue
else:
raise
self.assertEquals(received, sent)
events = win32file.WSAEnumNetworkEvents(server)
self.assertEquals(events, {})
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_WRITE: 0})
client.shutdown(socket.SHUT_WR)
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
# strange timing issues...
for i in range(5):
events = win32file.WSAEnumNetworkEvents(server, server_event)
if events: break
win32api.Sleep(100)
else:
raise AssertionError("failed to get events")
self.assertEquals(events, {win32file.FD_CLOSE: 0})
events = win32file.WSAEnumNetworkEvents(client)
self.assertEquals(events, {})
server.close()
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_CLOSE: 0})
client.close()
events = win32file.WSAEnumNetworkEvents(port)
self.assertEquals(events, {})
if __name__ == '__main__':
testmain()
| 42.407524 | 148 | 0.598339 |
6f1fed352744dad8240466872e2dae8b0272c69c | 819 | py | Python | stats.py | jivitesh-sharma/Drop-Clause-Interpretable-TM | 4fb4d4be0f24a0c30f13fbcca974390889d7fe84 | [
"MIT"
] | 1 | 2022-01-28T18:01:34.000Z | 2022-01-28T18:01:34.000Z | stats.py | jivitesh-sharma/Drop-Clause-Interpretable-TM | 4fb4d4be0f24a0c30f13fbcca974390889d7fe84 | [
"MIT"
] | 1 | 2021-09-22T17:39:16.000Z | 2021-09-22T17:39:16.000Z | stats.py | jivitesh-sharma/Drop-Clause-Interpretable-TM | 4fb4d4be0f24a0c30f13fbcca974390889d7fe84 | [
"MIT"
] | 2 | 2021-05-26T07:45:29.000Z | 2021-05-26T13:05:25.000Z | import numpy as np
import sys
m = np.loadtxt(sys.argv[1])
start = int(sys.argv[2])
#length = int(sys.argv[3])
print("MIN", m[np.logical_and(m[:,1] >= start, m[:,1] < start+25),:].min(axis=0))
print("MEAN", m[np.logical_and(m[:,1] >= start, m[:,1] < start+25),:].mean(axis=0))
print("MEDIAN", np.median(m[np.logical_and(m[:,1] >= start, m[:,1] < start+25),:], axis=0))
print("STD", m[np.logical_and(m[:,1] >= start, m[:,1] < start+25),:].std(axis=0))
print("95%", np.percentile(m[np.logical_and(m[:,1] >= start, m[:,1] < start+25),:], 95, axis=0))
print("MAX", m[np.logical_and(m[:,1] >= start, m[:,1] < start+25),:].max(axis=0))
ids = m[:,0]
data = m[:,1:]
_ndx = np.argsort(ids)
_id, _pos = np.unique(ids[_ndx], return_index=True)
g_max = np.maximum.reduceat(data[_ndx], _pos)
print("MIN-MAX:", g_max.min(axis=0))
| 39 | 96 | 0.603175 |
41c11e2b952473d80c398211bc2b8a11806b5d3c | 755 | py | Python | cloud.py | ricoen/IoT-monitoring | 731d549c59cd53fd89e3be1ae5d6434c1c0fc281 | [
"MIT"
] | null | null | null | cloud.py | ricoen/IoT-monitoring | 731d549c59cd53fd89e3be1ae5d6434c1c0fc281 | [
"MIT"
] | null | null | null | cloud.py | ricoen/IoT-monitoring | 731d549c59cd53fd89e3be1ae5d6434c1c0fc281 | [
"MIT"
] | null | null | null | import psutil
import influxdb_client
from influxdb_client.client.write_api import SYNCHRONOUS
from time import sleep
bucket = "<my-bucket>"
org = "<my-org>"
token = "<my-token>"
url = "https://ap-southeast-2-1.aws.cloud2.influxdata.com"
client = influxdb_client.InfluxDBClient(
token=token,
org=org,
url=url
)
write_api = client.write_api(write_options=SYNCHRONOUS)
temp = []
for i in range(10):
cpuPercent = psutil.cpu_percent(interval=5)
temp.append(cpuPercent)
print(temp[i])
point = influxdb_client.Point("measured").tag("computer", "Pop_OS!").field("cpu_percentage", temp[i])
write_api.write(bucket=bucket, org=org, record=point)
sleep(5)
print('Data: [%s]' % ', '.join(map(str, temp)))
| 21.571429 | 105 | 0.682119 |
de2f362d21c8360f3fb283485c5fa9d648bd3388 | 1,728 | py | Python | app/user/serializers.py | howard5248/django-restAPI-1 | 988f9e4efe3f63481323e242f0b96c215ac440e5 | [
"MIT"
] | null | null | null | app/user/serializers.py | howard5248/django-restAPI-1 | 988f9e4efe3f63481323e242f0b96c215ac440e5 | [
"MIT"
] | null | null | null | app/user/serializers.py | howard5248/django-restAPI-1 | 988f9e4efe3f63481323e242f0b96c215ac440e5 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
'''Serializer for the users object'''
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
'''create a new user with encrypted password and return it'''
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
'''Update a user, setting the password correctly and return it'''
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
'''Serializer for the user authentication object'''
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
'''Validate and authenticate the user'''
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
| 31.418182 | 74 | 0.646412 |
68f3c444e0ca269cecd365e1d90b6f242019d544 | 7,012 | py | Python | custom_conf/etc/calamares/src/modules/rawfs/main.py | ix-os/IXOS | 840abf7e022f46073d898fed5adb667bb5cb7166 | [
"CC0-1.0"
] | null | null | null | custom_conf/etc/calamares/src/modules/rawfs/main.py | ix-os/IXOS | 840abf7e022f46073d898fed5adb667bb5cb7166 | [
"CC0-1.0"
] | 13 | 2020-07-30T19:55:36.000Z | 2020-12-07T16:57:23.000Z | custom_conf/etc/calamares/src/modules/rawfs/main.py | ix-os/IXOS | 840abf7e022f46073d898fed5adb667bb5cb7166 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# === This file is part of Calamares - <https://github.com/calamares> ===
#
# Copyright 2019, Collabora Ltd <arnaud.ferraris@collabora.com>
#
# Calamares is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Calamares is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Calamares. If not, see <http://www.gnu.org/licenses/>.
import libcalamares
import os
import stat
import subprocess
from time import gmtime, strftime, sleep
from math import gcd
import gettext
_ = gettext.translation("calamares-python",
localedir=libcalamares.utils.gettext_path(),
languages=libcalamares.utils.gettext_languages(),
fallback=True).gettext
def pretty_name():
return _("Installing data.")
def lcm(a, b):
"""
Computes the Least Common Multiple of 2 numbers
"""
return a * b / gcd(a, b)
def get_device_size(device):
"""
Returns a filesystem's total size and block size in bytes.
For block devices, block size is the device's block size.
For other files (fs images), block size is 1 byte.
@param device: str
Absolute path to the device or filesystem image.
@return: tuple(int, int)
The filesystem's size and its block size.
"""
mode = os.stat(device).st_mode
if stat.S_ISBLK(mode):
basedevice = ""
partition = os.path.basename(device)
tmp = partition
while len(tmp) > 0:
tmp = tmp[:-1]
if os.path.exists("/sys/block/" + tmp):
basedevice = tmp
break
# Get device block size
file = open("/sys/block/" + basedevice + "/queue/hw_sector_size")
blocksize = int(file.readline())
file.close()
# Get partition size
file = open("/sys/block/" + basedevice + "/" + partition + "/size")
size = int(file.readline()) * blocksize
file.close()
else:
size = os.path.getsize(device)
blocksize = 1
return size, blocksize
class RawFSLowSpaceError(Exception):
pass
class RawFSItem:
__slots__ = ['source', 'destination', 'filesystem', 'resize']
def copy(self, current=0, total=1):
"""
Copies a raw filesystem on a disk partition, and grow it to the full destination
partition's size if required.
@param current: int
The index of the current item in the filesystems list
(used for progress reporting)
@param total: int
The number of items in the filesystems list
(used for progress reporting)
"""
count = 0
libcalamares.utils.debug("Copying {} to {}".format(self.source, self.destination))
if libcalamares.job.configuration.get("bogus", False):
return
srcsize, srcblksize = get_device_size(self.source)
destsize, destblksize = get_device_size(self.destination)
if destsize < srcsize:
raise RawFSLowSpaceError
return
# Compute transfer block size (100x the LCM of the block sizes seems a good fit)
blksize = int(100 * lcm(srcblksize, destblksize))
# Execute copy
src = open(self.source, "rb")
dest = open(self.destination, "wb")
buffer = src.read(blksize)
while len(buffer) > 0:
dest.write(buffer)
count += len(buffer)
# Compute job progress
progress = ((count / srcsize) + (current)) / total
libcalamares.job.setprogress(progress)
# Read next data block
buffer = src.read(blksize)
src.close()
dest.close()
if self.resize:
if "ext" in self.filesystem:
libcalamares.utils.debug("Resizing filesystem on {}".format(self.destination))
subprocess.run(["e2fsck", "-f", "-y", self.destination])
subprocess.run(["resize2fs", self.destination])
def __init__(self, config, device, fs):
libcalamares.utils.debug("Adding an entry for raw copy of {} to {}".format(
config["source"], device))
self.source = os.path.realpath(config["source"])
# If source is a mount point, look for the actual device mounted on it
if os.path.ismount(self.source):
procmounts = open("/proc/mounts", "r")
for line in procmounts:
if self.source in line.split():
self.source = line.split()[0]
break
self.destination = device
self.filesystem = fs
try:
self.resize = bool(config["resize"])
except KeyError:
self.resize = False
def update_global_storage(item, gs):
for partition in gs:
if partition["device"] == item.destination:
ret = subprocess.run(["blkid", "-s", "UUID", "-o", "value", item.destination],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
if ret.returncode == 0:
libcalamares.utils.debug("Setting {} UUID to {}".format(item.destination,
ret.stdout.rstrip()))
gs[gs.index(partition)]["uuid"] = ret.stdout.rstrip()
gs[gs.index(partition)]["source"] = item.source
libcalamares.globalstorage.remove("partitions")
libcalamares.globalstorage.insert("partitions", gs)
def run():
"""Raw filesystem copy module"""
filesystems = list()
partitions = libcalamares.globalstorage.value("partitions")
if not partitions:
libcalamares.utils.warning("partitions is empty, {!s}".format(partitions))
return (_("Configuration Error"),
_("No partitions are defined for <pre>{!s}</pre> to use." ).format("rawfs"))
libcalamares.utils.debug("Copying {!s} raw partitions.".format(len(partitions)))
for partition in partitions:
if partition["mountPoint"]:
for src in libcalamares.job.configuration["targets"]:
if src["mountPoint"] == partition["mountPoint"]:
filesystems.append(RawFSItem(src, partition["device"], partition["fs"]))
for item in filesystems:
try:
item.copy(filesystems.index(item), len(filesystems))
except RawFSLowSpaceError:
return ("Not enough free space",
"{} partition is too small to copy {} on it".format(item.destination, item.source))
update_global_storage(item, partitions)
return None
| 36.520833 | 99 | 0.6081 |
d49263d65535f41a4e1c076a3d0e4ebc7b140829 | 4,254 | py | Python | main.py | nimashoghi/ocp | 9c8d2eafa0cae80f0a9ae76aa563c4bf84887682 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | main.py | nimashoghi/ocp | 9c8d2eafa0cae80f0a9ae76aa563c4bf84887682 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | main.py | nimashoghi/ocp | 9c8d2eafa0cae80f0a9ae76aa563c4bf84887682 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import copy
import logging
import os
import sys
import time
from pathlib import Path
import submitit
from ocpmodels.common import distutils
from ocpmodels.common.flags import flags
from ocpmodels.common.registry import registry
from ocpmodels.common.utils import (
build_config,
create_grid,
save_experiment_log,
setup_imports,
setup_logging,
)
class Runner(submitit.helpers.Checkpointable):
def __init__(self):
self.config = None
def __call__(self, config):
setup_logging()
self.config = copy.deepcopy(config)
if args.distributed:
distutils.setup(config)
try:
setup_imports()
self.trainer = registry.get_trainer_class(
config.get("trainer", "simple")
)(
task=config["task"],
model=config["model"],
dataset=config["dataset"],
optimizer=config["optim"],
identifier=config["identifier"],
timestamp_id=config.get("timestamp_id", None),
run_dir=config.get("run_dir", "./"),
is_debug=config.get("is_debug", False),
is_vis=config.get("is_vis", False),
print_every=config.get("print_every", 10),
seed=config.get("seed", 0),
logger=config.get("logger", "tensorboard"),
local_rank=config["local_rank"],
amp=config.get("amp", False),
cpu=config.get("cpu", False),
slurm=config.get("slurm", {}),
)
self.task = registry.get_task_class(config["mode"])(self.config)
self.task.setup(self.trainer)
start_time = time.time()
self.task.run()
distutils.synchronize()
if distutils.is_master():
logging.info(f"Total time taken: {time.time() - start_time}")
finally:
if args.distributed:
distutils.cleanup()
def checkpoint(self, *args, **kwargs):
new_runner = Runner()
self.trainer.save(checkpoint_file="checkpoint.pt", training_state=True)
self.config["checkpoint"] = self.task.chkpt_path
self.config["timestamp_id"] = self.trainer.timestamp_id
if self.trainer.logger is not None:
self.trainer.logger.mark_preempting()
return submitit.helpers.DelayedSubmission(new_runner, self.config)
if __name__ == "__main__":
setup_logging()
parser = flags.get_parser()
args, override_args = parser.parse_known_args()
config = build_config(args, override_args)
if args.submit: # Run on cluster
slurm_add_params = config.get(
"slurm", None
) # additional slurm arguments
if args.sweep_yml: # Run grid search
configs = create_grid(config, args.sweep_yml)
else:
configs = [config]
logging.info(f"Submitting {len(configs)} jobs")
executor = submitit.AutoExecutor(
folder=args.logdir / "%j", slurm_max_num_timeout=3
)
executor.update_parameters(
name=args.identifier,
mem_gb=args.slurm_mem,
timeout_min=args.slurm_timeout * 60,
slurm_partition=args.slurm_partition,
gpus_per_node=args.num_gpus,
cpus_per_task=(config["optim"]["num_workers"] + 1),
tasks_per_node=(args.num_gpus if args.distributed else 1),
nodes=args.num_nodes,
slurm_additional_parameters=slurm_add_params,
)
for config in configs:
config["slurm"] = copy.deepcopy(executor.parameters)
config["slurm"]["folder"] = str(executor.folder)
jobs = executor.map_array(Runner(), configs)
logging.info(
f"Submitted jobs: {', '.join([job.job_id for job in jobs])}"
)
log_file = save_experiment_log(args, jobs, configs)
logging.info(f"Experiment log saved to: {log_file}")
else: # Run locally
Runner()(config)
| 33.761905 | 79 | 0.600376 |
ec7d69fe1824fb800fa8b1dfb91c7b84856e2a76 | 2,403 | py | Python | example/wide_n_deep/make_data.py | eddyJ/fedlearner | d31ef4b3ffe2f166dc738131359a98390c354a7c | [
"Apache-2.0"
] | 1 | 2020-03-10T07:26:07.000Z | 2020-03-10T07:26:07.000Z | example/wide_n_deep/make_data.py | eddyJ/fedlearner | d31ef4b3ffe2f166dc738131359a98390c354a7c | [
"Apache-2.0"
] | null | null | null | example/wide_n_deep/make_data.py | eddyJ/fedlearner | d31ef4b3ffe2f166dc738131359a98390c354a7c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
import shutil
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.core.example.example_pb2 import Example
from tensorflow.core.example.feature_pb2 import FloatList, Features, Feature, \
Int64List, BytesList
current_dir = os.path.dirname(__file__)
shutil.rmtree(os.path.join(current_dir, 'data'), ignore_errors=True)
os.makedirs(os.path.join(current_dir, 'data/leader'))
os.makedirs(os.path.join(current_dir, 'data/follower'))
(x, y), _ = tf.keras.datasets.mnist.load_data()
x = x.reshape(x.shape[0], -1).astype(np.float32) / 255.0
y = y.astype(np.int64)
xl = x[:, :x.shape[1]/2]
xf = x[:, x.shape[1]/2:]
N = 10
chunk_size = x.shape[0]//N
for i in range(N):
filename_l = os.path.join(current_dir, 'data/leader/%02d.tfrecords'%i)
filename_f = os.path.join(current_dir, 'data/follower/%02d.tfrecords'%i)
fl = tf.io.TFRecordWriter(filename_l)
ff = tf.io.TFRecordWriter(filename_f)
for j in range(chunk_size):
idx = i*chunk_size + j
features_l = {}
features_l['example_id'] = \
Feature(bytes_list=BytesList(value=[str(idx)]))
features_l['y'] = \
Feature(int64_list=Int64List(value=[y[idx]]))
features_l['x'] = \
Feature(float_list=FloatList(value=list(xl[idx])))
fl.write(
Example(features=Features(feature=features_l)).SerializeToString())
features_f = {}
features_f['example_id'] = \
Feature(bytes_list=BytesList(value=[str(idx)]))
features_f['x'] = \
Feature(float_list=FloatList(value=list(xf[idx])))
ff.write(
Example(features=Features(feature=features_f)).SerializeToString())
fl.close()
ff.close()
| 34.328571 | 79 | 0.669996 |
ee84c84b67ca493865c6de9ba918bed30e6ef892 | 20,730 | py | Python | r2l.py | Zeda/sqr2plsql | ba94943a49b834c7695d2949f26f62c27a28db0f | [
"RSA-MD"
] | 1 | 2020-07-23T11:37:31.000Z | 2020-07-23T11:37:31.000Z | r2l.py | Zeda/sqr2plsql | ba94943a49b834c7695d2949f26f62c27a28db0f | [
"RSA-MD"
] | 1 | 2020-07-21T18:50:01.000Z | 2020-07-21T18:50:01.000Z | r2l.py | Zeda/sqr2plsql | ba94943a49b834c7695d2949f26f62c27a28db0f | [
"RSA-MD"
] | null | null | null | #!/usr/bin/python3
import sys
import re
from os import listdir
from os.path import isfile, join
# To Do:
# Currently am auto-generating `col_<<column name>>` as a var
# Should instead:
# register column names that don't have an alias used in SELECT queries
# and occurence of that column name being used outside of a query is replaced with col_<<column name>> :)
#
#Convert the print statements to utl_file.put_line() statements
stack = []
selectvars_type = []
sep = '''
--------------------------------------------------------------------------------
'''
head = """set scan off
CREATE OR REPLACE PACKAGE pkz_name IS
{}END pkz_name;
/
SHOW ERRORS;
SET SCAN ON;
WHENEVER SQLERROR CONTINUE;
DROP PUBLIC SYNONYM pkz_name;
WHENEVER SQLERROR EXIT ROLLBACK;
CREATE PUBLIC SYNONYM pkz_name FOR pkz_name;
WHENEVER SQLERROR CONTINUE;
START gurgrtb pkz_name;
WHENEVER SQLERROR EXIT ROLLBACK;
"""
foot = sep + """END pkz_name;
/
show errors
grant execute on pkz_name to public;
"""
sqrkeywords = [
'print ',
'display ',
'add ',
'subtract ',
'let ',
'move ',
'do ',
'if ',
'write ',
'evaluate ']
# We'll save this regex for later :)
writepad = re.compile(':\\d+$')
# prntloc = re.compile('\([\+\-]*\d*\,*\d+\,\d+\)')
#
# def r2lprint(s):
# for match in prntloc.finditer(s):
# pass
#
# # match has the last value
# fmt = match.group().strip().split('--')[0].split(' edit ')
# s = r2lline(s[0:match.span()[0]])
#
#
# return "{}rpad(to_char({}), {})".format(indent,s[0:t.span()[0]])
def removeproc(s, proc):
print("removing " + proc)
proc_start = s.find('\nPROCEDURE ' + proc)
proc_end = s.find('\nEND ' + proc) + 6 + len(proc)
return s[0:proc_start] + s[proc_end:]
def deadprocremoval(s):
# gather all of the procedure names
all_procedures = []
i = 0
while i >= 0:
i = s.find('\nPROCEDURE P_', i)
if i >= 0:
i += 11
next_parens = s.find('(', i)
next_is = s.find(' IS', i)
if next_parens < next_is:
all_procedures += [s[i:next_parens]]
i = next_parens
else:
all_procedures += [s[i:next_is]]
i = next_is
# Now check for the appearance of of the procedures
l = s.split('\n')
all_procedures_swap = []
do_loop = True
while do_loop:
do_loop = False
for i in all_procedures:
if i != 'P_Main':
found = False
for j in l:
if j.strip().startswith(i) and j.strip()[len(i)].lower() not in 'abcdefghijklmnopqrstuvwxyz_':
found = True
break
if found:
all_procedures_swap += [i]
else:
s = removeproc(s, i)
do_loop = True
if do_loop:
do_loop = False
for i in all_procedures_swap:
found = False
for j in l:
if j.strip().startswith(i) and j.strip()[len(i)].lower() not in 'abcdefghijklmnopqrstuvwxyz_':
found = True
break
if found:
all_procedures += [i]
else:
s = removeproc(s, i[1:])
do_loop = True
return s
def decomment(s):
i = s.find('--')
if i >= 0:
comment = ' ' + s[i:].strip()
s = s[0:i].strip()
else:
comment = ''
s = s.strip()
return (s, comment)
def snake_to_camel(s):
if s=='':
return s
t = s[0].upper()
k = 1
while k<len(s):
u = False
while s[k] in ['-', '_'] and k<len(s)-1:
k += 1
u = True
if u:
t += s[k].upper()
else:
t += s[k].lower()
k += 1
return t
def r2lwrite(s):
if s.strip() == '':
return s
k = 0
while s[k] in ' \t':
k += 1
indent = s[0:k]
s = r2lline(s[k:])
t = writepad.search(s)
if not t:
if s.startswith('__num_'):
return "{}to_char({})".format(indent,s.split('--')[0].strip())
else:
return indent + s
if s.startswith('__num_'):
return "{}rpad(to_char({}), {})".format(indent,s[0:t.span()[0]],t.group()[1:])
else:
return "{}rpad({}, {})".format(indent,s[0:t.span()[0]],t.group()[1:])
def col_dealias(s):
k = 0
while s[k] in ' \t':
k += 1
indent = s[0:k]
s = s.strip().rstrip(',').rstrip()
if s.startswith('from'):
return ('', '', '', False)
for i in sqrkeywords:
if s.startswith(i):
return ('','','',False)
s = '__col_'+s.strip();
# now work backwards in s until an '&' is encountered
k = len(s) - 6
while not s.startswith('__col_', k):
k -= 1
if k == 0:
# then we don't have an alias!
col = s[6:]
alias = 'col_' + col
else:
col = s[6:k]
alias = s[k+2:]
return (indent, col.strip(), alias.strip(), True)
def r2lvar_rename(s):
t = ''
k = 0
while k<len(s):
m = True
for i in selectvars_type:
if s.startswith(i,k):
t += 'col_' + i
k += len(i)
m = False
if m:
t += s[k]
k += 1
return t
def r2lline(s):
global stack, constants
if s.strip() == '':
return s
# calculate the indent
k = 0
while s[k] in [' ','\t']:
k += 1
indent = s[0:k]
s = s.strip()
if s.startswith('let '):
s = s[4:].split('=')
t = ''
for i in s[1:]:
t += i + '='
(t, comment) = decomment(t[0:-1])
return "{}{}:={};{}".format(indent,s[0],t,comment)
elif s.startswith('input '):
return indent+'&'+s[6:]
elif s.startswith('do '):
return indent+'P_'+snake_to_camel(s[3:])+';'
elif s.startswith('add '):
#add x to y ==> y := y + x
s = s[4:].split(' to ')
t = s[1]
for i in s[2:]:
t += ' to ' + i
(t, comment) = decomment(t)
return "{}{} := {} + {};{}".format(indent,t,t,s[0].strip(), comment)
elif s.startswith('subtract '):
#subtract x from y ==> y := y - x
(s, comment) = decomment(s)
s = s[9:].split(' from ')
t = s[1]
for i in s[2:]:
t += ' from ' + i
return "{}{} := {} - {};{}".format(indent,t,t,s[0].strip(), comment)
elif s == 'begin-report':
stack += ['P_Main']
return sep+"PROCEDURE P_Main IS\nBEGIN"
elif s == 'end-report':
return "END P_Main;"
elif s.startswith('begin-heading'):
stack += ['P_PrintHeading']
return sep+"PROCEDURE P_PrintHeading IS\nBEGIN"
elif s == 'end-heading':
return "END P_PrintHeading;"
elif s.startswith('begin-procedure'):
s = 'P_'+snake_to_camel(s[16:])
stack += [s]
return "{}PROCEDURE {} IS\nBEGIN".format(sep,s)
elif s.startswith('end-procedure'):
return "END {};".format(stack[-1])
elif s.startswith('while '):
(s, comment) = decomment(s[6:])
return "{}WHILE {} LOOP{}".format(indent, s, comment)
elif s.startswith('if '):
(s, comment) = decomment(s[3:])
return "{}IF {} THEN{}".format(indent, s, comment)
elif s == 'end-while':
return indent+'END LOOP;'
elif s.startswith('end-if'):
return indent+'END IF;'+s[6:]
elif s.startswith('begin-select'):
stack += [s[6:].upper(),True]
return ''
elif s == 'new-page':
s = "P_PrintHeading;"
elif s.startswith('move '):
s = s[5:].split(' to ')
t = s[1].strip().split(' ')
if t[0].startswith('__num_'):
return "{}{} := to_number({});".format(indent,t[0].strip(),s[0].strip())
else:
t += ['']
t[1] = t[1].strip()
if t[1] != '':
return "{}{} := to_char({}, '{}');".format(indent,t[0].strip(),s[0].strip(),t[1].strip())
else:
return "{}{} := to_char({});".format(indent,t[0].strip(),s[0].strip())
elif s.startswith('display '):
(s, comment) = decomment(s[8:])
if s.endswith('noline'):
return "{}DBMS_OUTPUT.PUT({});{}".format(indent, s[0:-6].strip(), comment)
else:
return "{}DBMS_OUTPUT.PUT_LINE({});{}".format(indent, s, comment)
elif s.startswith('open '):
if ' as ' not in s:
return s
else:
s = s[5:].split(' as ')
return "{}file_{} := UTL_FILE.FOPEN('{}', {}, 'w');".format(indent,s[1].split(' ')[0],'FILE_DIR',s[0])
elif s.startswith('close '):
return "{}UTL_FILE.FCLOSE(file_{});".format(indent, s[6:])
elif s in 'end-sql':
return ';'
elif s in ['commit']:
return "{}{};".format(indent, s, ';')
elif s in ['begin-setup', 'end-setup', 'begin-sql']:
return ''
elif s.startswith('page-size'):
s = s[9:].strip().split(' ')
constants += [["page height", int(s[0])], ["page width", int(s[1])]]
return ''
elif s.startswith('__num_define'):
s = s[12:].strip().replace('\t', ' ').split(' ')
constants += [[s[0], s[-1]]]
return ''
return indent+s
def low(s):
t = ''
k = 0
stck = ['']
dolow = True
while k<len(s):
i = s[k]
if i == "'":
if stck[-1] == i:
stck.pop()
if len(stck) == 1:
dolow = True
else:
stck += [i]
dolow = False
if dolow:
if i == '!':
if s[k+1] == '=':
t += '<>'
k += 1
else:
t += '--'
while s[k+1]!='\n':
k += 1
t += s[k]
elif i == '&':
t += '__col_'
elif i == '#':
t += '__num_'
elif i == '$':
t += '__var_'
else:
t += s[k].lower()
elif s[k] == '&':
t += "' || chr(38) || '"
else:
t += s[k]
k += 1
return t
def r2l(s):
def r2l_write():
nonlocal i, s, k
i = i.strip()
idx = i.lower().index('from')
out = '\tutl_file.put_line(file_{},'.format(i[6:idx].strip())
out += r2lwrite(i[idx + 4:])
while s[k+1].strip() == '':
k += 1
k += 1
i = s[k].strip()
while i.startswith("'") or i.startswith('__num_') or i.startswith('__col_') or i.startswith('__var_'):
out += ' ||\n\t\t\t'
out += r2lwrite(s[k])
k += 1
i = s[k].strip()
return out + ');\n'
def r2l_evaluate():
nonlocal i, s, k
(v, out) = decomment(i.strip()[9:])
k += 1
cond = []
first = True
while not s[k].strip().lower().startswith('end-evaluate'):
i = s[k]
if i.strip().lower().startswith('when '):
(i, comment) = decomment(i)
#indent
indent = i.lower().split('when')[0]
cond += ["{} = {}".format(v, i[i.index('=') + 1:].strip())]
elif i.strip().lower().startswith('when-other'):
# treat this like an ELSE
# if there are any conditions queued, discard them
out += "{}ELSE\n".format(indent)
cond = []
else:
if len(cond) > 0:
if first:
out += "{}IF {}".format(indent, cond[0])
first = False
else:
out += "{}ELSIF {}".format(indent, cond[0])
for j in cond[1:]:
out += ' OR {}'.format(j)
out += ' THEN\n'
cond = []
out += r2lline(i) + '\n'
k += 1
k += 1
i = s[k]
return out + '{}END IF;\n'.format(indent)
global stack, selectvars_type, constants
constants = []
stack = []
selectvars = []
selectvars_i = []
selectvars_type = []
cursors = ''
curse_cnt = 0
index = 0
s = low(s)
out = ''
s = s.split('\n')
k = 0
# We'll read any comments at the top of the file and keep them there
comment = ''
while s[k].startswith('--') or s[k].strip() == '':
comment += s[k]+'\n'
k += 1
while k<len(s):
i = s[k]
while i.strip().startswith('write'):
out += r2l_write()
if i.strip().startswith('evaluate '):
out += r2l_evaluate()
if len(stack) == 0:
out += r2lline(r2lvar_rename(i)) + '\n'
elif stack[-1] != True:
out += r2lline(i) + '\n'
else:
# here we are in a select statement that we have to parse
# Note! This doesn't handle comments or trailing whitespace!
curse = ''
while i == '':
k += 1
i = s[k]
cont_loop = True
prev_col = ''
prev_alias = ''
prev_indent = ''
while cont_loop:
# read backwards to determine the output name, if any
i = i.split('!')[0]
if i.strip()!='':
(indent, col, alias, cont_loop) = col_dealias(i)
if cont_loop:
if col == '':
prev_alias = alias
if prev_col != '':
if prev_alias == 'col_' + prev_col:
curse += "\t\t{}{},\n".format(prev_indent, prev_col)
else:
curse += "\t\t{}{}\t\t{},\n".format(prev_indent, prev_col, prev_alias[4:])
selectvars += [prev_alias]
selectvars_i += [index]
selectvars_type += [prev_col]
k += 1
i = s[k]
while i == '':
k += 1
i = s[k]
else:
k += 1
i = s[k]
prev_col = col
prev_alias = alias
prev_indent = indent
else:
k += 1
i = s[k]
if prev_col != '':
if prev_alias == 'col_' + prev_col:
curse += "\t\t{}{},\n".format(prev_indent, prev_col)
else:
curse += "\t\t{}{}\t\t{},\n".format(prev_indent, prev_col, prev_alias[4:])
selectvars += [prev_alias]
selectvars_i += [index]
selectvars_type += [prev_col]
curse = curse.rstrip(',\n \t') + '\n'
work = ''
while not i.strip().startswith("from"):
if i.strip().startswith('write'):
work += r2l_write()
elif i.strip().startswith('evaluate '):
work += r2l_evaluate()
else:
work += r2lline(i) + '\n'
k += 1
i = s[k]
logic = ''
while "end-select" not in i:
logic += '\n\t' + r2lline(i)
k += 1
i = s[k]
logic = logic.replace('and ', 'AND ')
logic = logic.replace('or ', 'OR ')
logic = logic.replace('from ', 'FROM ')
logic = logic.replace('where ', 'WHERE ')
stack.pop()
if work != '':
cname = "Cursor{}".format(curse_cnt)
cursors += "{}CURSOR {} IS\n\t{}\n{}{};\n".format(sep, cname, stack.pop(), curse, logic)
curse_cnt += 1
out += 'FOR i{} IN {} LOOP\n\t{}\nEND LOOP;\n'.format(index,cname,work.strip())
index+=1
else:
#otherwise, we have a select into
out += stack.pop()
for i in curse.split('\n'):
out += '\n\t\t'+i.strip().split('\t')[0] + ','
while out != out.rstrip().rstrip(','):
out = out.rstrip().rstrip(',')
out += "\nINTO"
j = 0
while selectvars_i[j] != index:
j += 1
i = j
while i<len(selectvars_i)-1:
out += '\n\t\t'+selectvars[i]+','
i += 1
out += '\n\t\t'+selectvars[i]
selectvars = selectvars[0:j]
selectvars_i = selectvars_i[0:j]
selectvars_type = selectvars_type[0:j]
out += logic+';\n'
k += 1
# out = out.replace('select','SELECT')
# out = out.replace('where','WHERE')
# out = out.replace('and','AND')
# out = out.replace('or','OR')
# combine the selectvar variables so that we can sort
vars = sep + 'CREATE OR REPLACE PACKAGE BODY pkz_name IS\n'
h = -1
w = -1
for i in constants:
if i[0] == 'page height':
h = i[1]
elif i[0] == 'page width':
w = i[1]
break
vars += "\t{}\t\t\tNUMBER:={};\n".format("max_lines", h)
vars += "\t{}\t\t\tNUMBER:={};\n".format("report_width", w)
vars += "\t{}\t\t\tNUMBER:={};\n".format("page_num", 1)
vars += "\t{}\t\t\tNUMBER;\n".format("line_num")
vars += """ cr_file UTL_FILE.FILE_TYPE;
current_date_dmy VARCHAR2(11);
current_time VARCHAR2(8);
inst_name gubinst.gubinst_name%TYPE;
program_name VARCHAR2(15);
run_msg VARCHAR2(60);
rpt_msg VARCHAR2(50);
file_name VARCHAR2(50);
report_title VARCHAR2(100);\n--\n"""
k = 0
while k < len(selectvars):
var = selectvars[k]
typ = selectvars_type[k]
if '__' + var in out:
out = out.replace('__' + var, "i{}.{}".format(selectvars_i[k],var[4:]))
substr = " := i{}.{};".format(selectvars_i[k],var[4:])
i = out.find(substr)
while i >= 0:
n = i
while out[n] in ' \t':
n -= 1
while out[n] in '_.' or '0'<=out[n]<='9' or 'A'<=out[n]<='Z' or 'a'<=out[n]<='z':
n -= 1
if " {} ".format(out[n:i].strip()) not in vars:
vars += "\t{}\t\t\t{}.{}%TYPE;\n".format(out[n:i].strip(), typ.split('_')[0], typ)
i = out.find(substr, i + 1)
k += 1
defs = ''
for i in stack:
defs += "\tPROCEDURE {};\n".format(i)
out += """-------------------------------------------------------------------------------
PROCEDURE P_PrintLine(s VARCHAR2) IS
BEGIN
utl_file.put_line (cr_file, rtrim(substr(s, 1, report_width)));
line_num := line_num + 1;
IF line_num >= max_lines THEN
P_PrintHeading;
END IF;
END P_PrintLine;
"""
s = comment+head.format(defs)+vars+cursors+out+foot
s = s.replace('__col_', 'col_')
# s = s.replace('__num_', '')
# s = s.replace('__var_', '')
s = s.replace('__num_', 'num_')
s = s.replace('__var_', 'var_')
s = s.replace('current-line', '__line_num')
s = s.replace('\n;', ';')
s = s.replace(" = ''", ' IS NULL')
s = s.replace("<> ''", 'IS NOT NULL')
while '\n\n\n--' in s:
s = s.replace('\n\n\n--', '\n\n--')
while 'BEGIN\n\n' in s:
s = s.replace('BEGIN\n\n', 'BEGIN\n')
while '\n\nEND' in s:
s = s.replace('\n\nEND', '\nEND')
for i in constants:
s = s.replace('{'+i[0]+'}', str(i[1]))
return deadprocremoval(s)
k = 1
while k<len(sys.argv):
fin = sys.argv[k]
f = open(fin, 'r')
s = f.read()
f.close()
s = r2l(s)
if k+1 < len(sys.argv):
fout = sys.argv[k+1]
s = s.replace('pkz_name', fout.replace('/','\\').split('\\')[-1].split('.')[0])
print("{} ==> {}".format(fin,fout))
f = open(fout,'w')
f.write(s)
f.close()
else:
print(s)
k += 2
| 30.087083 | 114 | 0.429571 |
a387787f7a105390526b501b3c3003fb72a66390 | 8,982 | py | Python | tfx/orchestration/launcher/kubernetes_component_launcher.py | suryaavala/tfx | c315e7cf75822088e974e15b43c96fab86746733 | [
"Apache-2.0"
] | 1 | 2021-05-10T10:41:06.000Z | 2021-05-10T10:41:06.000Z | tfx/orchestration/launcher/kubernetes_component_launcher.py | suryaavala/tfx | c315e7cf75822088e974e15b43c96fab86746733 | [
"Apache-2.0"
] | null | null | null | tfx/orchestration/launcher/kubernetes_component_launcher.py | suryaavala/tfx | c315e7cf75822088e974e15b43c96fab86746733 | [
"Apache-2.0"
] | null | null | null | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Docker component launcher which launches a container in docker environment ."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, List, Optional, Text, cast
from absl import logging
from kubernetes import client
from tfx import types
from tfx.dsl.component.experimental import executor_specs
from tfx.dsl.components.base import executor_spec
from tfx.orchestration.config import base_component_config
from tfx.orchestration.config import kubernetes_component_config
from tfx.orchestration.launcher import base_component_launcher
from tfx.orchestration.launcher import container_common
from tfx.utils import kube_utils
class KubernetesComponentLauncher(base_component_launcher.BaseComponentLauncher
):
"""Responsible for launching a container executor on Kubernetes."""
# TODO(hongyes): add container spec into exec_properties for driver to check.
@classmethod
def can_launch(
cls,
component_executor_spec: executor_spec.ExecutorSpec,
component_config: base_component_config.BaseComponentConfig = None
) -> bool:
"""Checks if the launcher can launch the executor spec."""
if component_config and not isinstance(
component_config,
kubernetes_component_config.KubernetesComponentConfig):
return False
return isinstance(component_executor_spec,
(executor_spec.ExecutorContainerSpec,
executor_specs.TemplatedExecutorContainerSpec))
def _run_executor(self, execution_id: int,
input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Execute underlying component implementation.
Runs executor container in a Kubernetes Pod and wait until it goes into
`Succeeded` or `Failed` state.
Args:
execution_id: The ID of the execution.
input_dict: Input dict from input key to a list of Artifacts. These are
often outputs of another component in the pipeline and passed to the
component by the orchestration system.
output_dict: Output dict from output key to a list of Artifacts. These are
often consumed by a dependent component.
exec_properties: A dict of execution properties. These are inputs to
pipeline with primitive types (int, string, float) and fully
materialized when a pipeline is constructed. No dependency to other
component or later injection from orchestration systems is necessary or
possible on these values.
Raises:
RuntimeError: when the pod is in `Failed` state or unexpected failure from
Kubernetes API.
"""
container_spec = cast(executor_spec.ExecutorContainerSpec,
self._component_executor_spec)
# Replace container spec with jinja2 template.
container_spec = container_common.resolve_container_template(
container_spec, input_dict, output_dict, exec_properties)
pod_name = self._build_pod_name(execution_id)
# TODO(hongyes): replace the default value from component config.
try:
namespace = kube_utils.get_kfp_namespace()
except RuntimeError:
namespace = 'kubeflow'
pod_manifest = self._build_pod_manifest(pod_name, container_spec)
core_api = kube_utils.make_core_v1_api()
if kube_utils.is_inside_kfp():
launcher_pod = kube_utils.get_current_kfp_pod(core_api)
pod_manifest['spec']['serviceAccount'] = launcher_pod.spec.service_account
pod_manifest['spec'][
'serviceAccountName'] = launcher_pod.spec.service_account_name
pod_manifest['metadata'][
'ownerReferences'] = container_common.to_swagger_dict(
launcher_pod.metadata.owner_references)
else:
pod_manifest['spec']['serviceAccount'] = kube_utils.TFX_SERVICE_ACCOUNT
pod_manifest['spec'][
'serviceAccountName'] = kube_utils.TFX_SERVICE_ACCOUNT
logging.info('Looking for pod "%s:%s".', namespace, pod_name)
resp = kube_utils.get_pod(core_api, pod_name, namespace)
if not resp:
logging.info('Pod "%s:%s" does not exist. Creating it...',
namespace, pod_name)
logging.info('Pod manifest: %s', pod_manifest)
try:
resp = core_api.create_namespaced_pod(
namespace=namespace, body=pod_manifest)
except client.rest.ApiException as e:
raise RuntimeError(
'Failed to created container executor pod!\nReason: %s\nBody: %s' %
(e.reason, e.body))
# Wait up to 300 seconds for the pod to move from pending to another status.
logging.info('Waiting for pod "%s:%s" to start.', namespace, pod_name)
kube_utils.wait_pod(
core_api,
pod_name,
namespace,
exit_condition_lambda=kube_utils.pod_is_not_pending,
condition_description='non-pending status',
timeout_sec=300)
logging.info('Start log streaming for pod "%s:%s".', namespace, pod_name)
try:
logs = core_api.read_namespaced_pod_log(
name=pod_name,
namespace=namespace,
container=kube_utils.ARGO_MAIN_CONTAINER_NAME,
follow=True,
_preload_content=False).stream()
except client.rest.ApiException as e:
raise RuntimeError(
'Failed to stream the logs from the pod!\nReason: %s\nBody: %s' %
(e.reason, e.body))
for log in logs:
logging.info(log.decode().rstrip('\n'))
# Wait indefinitely for the pod to complete.
resp = kube_utils.wait_pod(
core_api,
pod_name,
namespace,
exit_condition_lambda=kube_utils.pod_is_done,
condition_description='done state')
if resp.status.phase == kube_utils.PodPhase.FAILED.value:
raise RuntimeError('Pod "%s:%s" failed with status "%s".' %
(namespace, pod_name, resp.status))
logging.info('Pod "%s:%s" is done.', namespace, pod_name)
def _build_pod_manifest(
self, pod_name: Text,
container_spec: executor_spec.ExecutorContainerSpec) -> Dict[Text, Any]:
"""Build a pod spec.
The function builds a pod spec by patching executor container spec into
the pod spec from component config.
Args:
pod_name: The name of the pod.
container_spec: The resolved executor container spec.
Returns:
The pod manifest in dictionary format.
"""
if self._component_config:
kubernetes_config = cast(
kubernetes_component_config.KubernetesComponentConfig,
self._component_config)
pod_manifest = container_common.to_swagger_dict(kubernetes_config.pod)
else:
pod_manifest = {}
pod_manifest.update({
'apiVersion': 'v1',
'kind': 'Pod',
})
# TODO(hongyes): figure out a better way to figure out type hints for nested
# dict.
metadata = pod_manifest.setdefault('metadata', {}) # type: Dict[Text, Any] # pytype: disable=annotation-type-mismatch
metadata.update({'name': pod_name})
spec = pod_manifest.setdefault('spec', {}) # type: Dict[Text, Any] # pytype: disable=annotation-type-mismatch
spec.update({'restartPolicy': 'Never'})
containers = spec.setdefault('containers',
[]) # type: List[Dict[Text, Any]]
container = None # type: Optional[Dict[Text, Any]]
for c in containers:
if c['name'] == kube_utils.ARGO_MAIN_CONTAINER_NAME:
container = c
break
if not container:
container = {'name': kube_utils.ARGO_MAIN_CONTAINER_NAME}
containers.append(container)
container.update({
'image': container_spec.image,
'command': container_spec.command,
'args': container_spec.args,
})
return pod_manifest
def _build_pod_name(self, execution_id: int) -> Text:
if self._pipeline_info.run_id:
pipeline_name = (
self._pipeline_info.pipeline_name[:50] + '-' +
self._pipeline_info.run_id[:50])
else:
pipeline_name = self._pipeline_info.pipeline_name[:100]
pod_name = '%s-%s-%s' % (
pipeline_name, self._component_info.component_id[:50], execution_id)
return kube_utils.sanitize_pod_name(pod_name)
| 39.394737 | 123 | 0.691494 |
04e8e3089be7716286daa298c66787746a4e98c2 | 1,840 | py | Python | administrator/controller/loginController.py | augustoberwaldt/skin-system | 4ca878d3062abcf3f68c044abaaa631c88f40c5b | [
"MIT"
] | 2 | 2017-08-29T14:17:44.000Z | 2017-08-31T14:40:39.000Z | administrator/controller/loginController.py | augustoberwaldt/diseases-recognition | 4ca878d3062abcf3f68c044abaaa631c88f40c5b | [
"MIT"
] | 2 | 2020-02-11T23:27:55.000Z | 2020-06-05T17:30:29.000Z | administrator/controller/loginController.py | augustoberwaldt/diseases-recognition | 4ca878d3062abcf3f68c044abaaa631c88f40c5b | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from administrator.formsModel import FormUser
from django.contrib import messages
import json
def do_login(request):
if request.method == 'POST' :
username = request.POST['username']
password = request.POST['password']
user = authenticate(request, username=username, password=password)
if user is not None :
login(request,user)
return redirect('/app/home')
else :
reponse = {'title' : 'Dados invalidos ! ', 'message' : 'Tente novamente.', 'type' : 'error' }
messages.add_message(request, messages.INFO, json.dumps(reponse))
if request.user.is_authenticated():
return redirect('/app/home')
return render(request, 'auth/login.html')
def resetPass(request):
return render(request, 'auth/resetPassword.html')
def register(request):
form = FormUser.FormUser(request.POST or None);
context = {'form': form}
if request.method == 'POST':
if form.is_valid():
form.save()
reponse = {'title': 'Success ', 'message': 'Cadastro realizado !', 'type': 'success'}
messages.add_message(request, messages.INFO, json.dumps(reponse))
return redirect('/app/register')
else :
reponse = {'title': 'Dados invalidos ! ', 'message': 'Tente novamente.', 'type': 'error'}
messages.add_message(request, messages.INFO, json.dumps(reponse))
return render(request, 'auth/register.html', context)
def do_logout(request):
logout(request)
reponse = {'title': 'Success ', 'message': 'Saida com sucesso !', 'type': 'success'}
messages.add_message(request, messages.INFO, json.dumps(reponse))
return redirect('/app/')
| 32.857143 | 105 | 0.640761 |
1b6a13978c52501014801c6f8f69c14017ce7de3 | 25,885 | py | Python | EfficientNet/utils.py | DaikiSannoXC/SmoothAdversarialTraining | fa33249377fb181df65b055ccc04707138127346 | [
"MIT"
] | 61 | 2020-09-25T05:34:52.000Z | 2022-03-18T02:32:43.000Z | EfficientNet/utils.py | DaikiSannoXC/SmoothAdversarialTraining | fa33249377fb181df65b055ccc04707138127346 | [
"MIT"
] | 1 | 2021-03-26T13:29:07.000Z | 2021-03-26T13:29:07.000Z | EfficientNet/utils.py | DaikiSannoXC/SmoothAdversarialTraining | fa33249377fb181df65b055ccc04707138127346 | [
"MIT"
] | 2 | 2021-02-11T08:35:17.000Z | 2021-07-13T05:04:09.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.python.tpu import tpu_function # pylint:disable=g-direct-tensorflow-import
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.ops import variables as tf_variables
def build_learning_rate(initial_lr,
global_step,
steps_per_epoch=None,
lr_decay_type='exponential',
decay_factor=0.97,
decay_epochs=2.4,
total_steps=None,
warmup_epochs=5):
"""Build learning rate."""
if lr_decay_type == 'exponential':
assert steps_per_epoch is not None
decay_steps = steps_per_epoch * decay_epochs
lr = tf.train.exponential_decay(
initial_lr, global_step, decay_steps, decay_factor, staircase=True)
elif lr_decay_type == 'cosine':
assert total_steps is not None
lr = 0.5 * initial_lr * (
1 + tf.cos(np.pi * tf.cast(global_step, tf.float32) / total_steps))
elif lr_decay_type == 'constant':
lr = initial_lr
else:
assert False, 'Unknown lr_decay_type : %s' % lr_decay_type
if warmup_epochs:
logging.info('Learning rate warmup_epochs: %d', warmup_epochs)
warmup_steps = int(warmup_epochs * steps_per_epoch)
warmup_lr = (
initial_lr * tf.cast(global_step, tf.float32) / tf.cast(
warmup_steps, tf.float32))
lr = tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)
return lr
def build_optimizer(learning_rate,
optimizer_name='rmsprop',
decay=0.9,
epsilon=0.001,
momentum=0.9):
"""Build optimizer."""
if optimizer_name == 'sgd':
logging.info('Using SGD optimizer')
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
elif optimizer_name == 'momentum':
logging.info('Using Momentum optimizer')
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=momentum)
elif optimizer_name == 'rmsprop':
logging.info('Using RMSProp optimizer')
optimizer = tf.train.RMSPropOptimizer(learning_rate, decay, momentum,
epsilon)
else:
logging.fatal('Unknown optimizer: %s', optimizer_name)
return optimizer
class TpuBatchNormalization(tf.layers.BatchNormalization):
# class TpuBatchNormalization(tf.layers.BatchNormalization):
"""Cross replica batch normalization."""
def __init__(self, fused=False, **kwargs):
if fused in (True, None):
raise ValueError('TpuBatchNormalization does not support fused=True.')
super(TpuBatchNormalization, self).__init__(fused=fused, **kwargs)
def _cross_replica_average(self, t, num_shards_per_group):
"""Calculates the average value of input tensor across TPU replicas."""
num_shards = tpu_function.get_tpu_context().number_of_shards
group_assignment = None
if num_shards_per_group > 1:
if num_shards % num_shards_per_group != 0:
raise ValueError('num_shards: %d mod shards_per_group: %d, should be 0'
% (num_shards, num_shards_per_group))
num_groups = num_shards // num_shards_per_group
group_assignment = [[
x for x in range(num_shards) if x // num_shards_per_group == y
] for y in range(num_groups)]
return tf.tpu.cross_replica_sum(t, group_assignment) / tf.cast(
num_shards_per_group, t.dtype)
def _moments(self, inputs, reduction_axes, keep_dims):
"""Compute the mean and variance: it overrides the original _moments."""
shard_mean, shard_variance = super(TpuBatchNormalization, self)._moments(
inputs, reduction_axes, keep_dims=keep_dims)
num_shards = tpu_function.get_tpu_context().number_of_shards or 1
if num_shards <= 8: # Skip cross_replica for 2x2 or smaller slices.
num_shards_per_group = 1
else:
num_shards_per_group = max(8, num_shards // 8)
logging.info('TpuBatchNormalization with num_shards_per_group %s',
num_shards_per_group)
if num_shards_per_group > 1:
# Compute variance using: Var[X]= E[X^2] - E[X]^2.
shard_square_of_mean = tf.math.square(shard_mean)
shard_mean_of_square = shard_variance + shard_square_of_mean
group_mean = self._cross_replica_average(
shard_mean, num_shards_per_group)
group_mean_of_square = self._cross_replica_average(
shard_mean_of_square, num_shards_per_group)
group_variance = group_mean_of_square - tf.math.square(group_mean)
return (group_mean, group_variance)
else:
return (shard_mean, shard_variance)
class BatchNormalization(tf.layers.BatchNormalization):
"""Fixed default name of BatchNormalization to match TpuBatchNormalization."""
def __init__(self, name='tpu_batch_normalization', **kwargs):
super(BatchNormalization, self).__init__(name=name, **kwargs)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
ndims = len(input_shape)
# Convert axis to list and resolve negatives
if isinstance(self.axis, int):
self.axis = [self.axis]
for idx, x in enumerate(self.axis):
if x < 0:
self.axis[idx] = ndims + x
# Validate axes
for x in self.axis:
if x < 0 or x >= ndims:
raise ValueError('Invalid axis: %d' % x)
if len(self.axis) != len(set(self.axis)):
raise ValueError('Duplicate axis: %s' % self.axis)
if self.virtual_batch_size is not None:
if self.virtual_batch_size <= 0:
raise ValueError('virtual_batch_size must be a positive integer that '
'divides the true batch size of the input Tensor')
# If using virtual batches, the first dimension must be the batch
# dimension and cannot be the batch norm axis
if 0 in self.axis:
raise ValueError('When using virtual_batch_size, the batch dimension '
'must be 0 and thus axis cannot include 0')
if self.adjustment is not None:
raise ValueError('When using virtual_batch_size, adjustment cannot '
'be specified')
if self.fused in (None, True):
# TODO(yaozhang): if input is not 4D, reshape it to 4D and reshape the
# output back to its original shape accordingly.
if self._USE_V2_BEHAVIOR:
if self.fused is None:
self.fused = (ndims == 4)
elif self.fused and ndims != 4:
raise ValueError('Batch normalization layers with fused=True only '
'support 4D input tensors.')
else:
assert self.fused is not None
self.fused = (ndims == 4 and self._fused_can_be_used())
# TODO(chrisying): fused batch norm is currently not supported for
# multi-axis batch norm and by extension virtual batches. In some cases,
# it might be possible to use fused batch norm but would require reshaping
# the Tensor to 4D with the axis in 1 or 3 (preferred 1) which is
# particularly tricky. A compromise might be to just support the most
# common use case (turning 5D w/ virtual batch to NCHW)
if self.fused:
if self.axis == [1]:
self._data_format = 'NCHW'
elif self.axis == [3]:
self._data_format = 'NHWC'
else:
raise ValueError('Unsupported axis, fused batch norm only supports '
'axis == [1] or axis == [3]')
axis_to_dim = {x: input_shape.dims[x].value for x in self.axis}
for x in axis_to_dim:
if axis_to_dim[x] is None:
raise ValueError('Input has undefined `axis` dimension. Input shape: ',
input_shape)
self.input_spec = InputSpec(ndim=ndims, axes=axis_to_dim)
if len(axis_to_dim) == 1 and self.virtual_batch_size is None:
# Single axis batch norm (most common/default use-case)
# param_shape = (list(axis_to_dim.values())[0],)
param_shape = (1, 1, 1, 1, list(axis_to_dim.values())[0])
else:
# Parameter shape is the original shape but with 1 in all non-axis dims
param_shape = [
axis_to_dim[i] if i in axis_to_dim else 1 for i in range(ndims)
]
if self.virtual_batch_size is not None:
# When using virtual batches, add an extra dim at index 1
param_shape.insert(1, 1)
for idx, x in enumerate(self.axis):
self.axis[idx] = x + 1 # Account for added dimension
if self.scale:
self.gamma = self.add_weight(
name='gamma',
shape=param_shape,
dtype=self._param_dtype,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True,
experimental_autocast=False)
else:
self.gamma = None
if self.fused:
self._gamma_const = K.constant(
1.0, dtype=self._param_dtype, shape=param_shape)
self.gamma = tf.reshape(self.gamma, [-1])
if self.center:
self.beta = self.add_weight(
name='beta',
shape=param_shape,
dtype=self._param_dtype,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True,
experimental_autocast=False)
else:
self.beta = None
if self.fused:
self._beta_const = K.constant(
0.0, dtype=self._param_dtype, shape=param_shape)
self.beta = tf.reshape(self.beta, [-1])
try:
# Disable variable partitioning when creating the moving mean and variance
if hasattr(self, '_scope') and self._scope:
partitioner = self._scope.partitioner
self._scope.set_partitioner(None)
else:
partitioner = None
self.moving_mean = self.add_weight(
name='moving_mean',
shape=param_shape,
dtype=self._param_dtype,
initializer=self.moving_mean_initializer,
synchronization=tf_variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf_variables.VariableAggregation.MEAN,
experimental_autocast=False)
self.moving_variance = self.add_weight(
name='moving_variance',
shape=param_shape,
dtype=self._param_dtype,
initializer=self.moving_variance_initializer,
synchronization=tf_variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf_variables.VariableAggregation.MEAN,
experimental_autocast=False)
self.moving_mean = tf.reshape(self.moving_mean, [-1])
self.moving_variance = tf.reshape(self.moving_variance, [-1])
if self.renorm:
# In batch renormalization we track the inference moving stddev instead
# of the moving variance to more closely align with the paper.
def moving_stddev_initializer(*args, **kwargs):
return math_ops.sqrt(
self.moving_variance_initializer(*args, **kwargs))
with distribution_strategy_context.get_strategy(
).extended.colocate_vars_with(self.moving_variance):
self.moving_stddev = self.add_weight(
name='moving_stddev',
shape=param_shape,
dtype=self._param_dtype,
initializer=moving_stddev_initializer,
synchronization=tf_variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf_variables.VariableAggregation.MEAN,
experimental_autocast=False)
self.moving_stddev = tf.reshape(self.moving_stddev, [-1])
# Create variables to maintain the moving mean and standard deviation.
# These are used in training and thus are different from the moving
# averages above. The renorm variables are colocated with moving_mean
# and moving_stddev.
# NOTE: below, the outer `with device` block causes the current device
# stack to be cleared. The nested ones use a `lambda` to set the desired
# device and ignore any devices that may be set by the custom getter.
def _renorm_variable(name,
shape,
initializer=init_ops.zeros_initializer()):
"""Create a renorm variable."""
var = self.add_weight(
name=name,
shape=shape,
dtype=self._param_dtype,
initializer=initializer,
synchronization=tf_variables.VariableSynchronization.ON_READ,
trainable=False,
aggregation=tf_variables.VariableAggregation.MEAN,
experimental_autocast=False)
var = tf.reshape(var, [-1])
return var
with distribution_strategy_context.get_strategy(
).extended.colocate_vars_with(self.moving_mean):
self.renorm_mean = _renorm_variable('renorm_mean', param_shape,
self.moving_mean_initializer)
with distribution_strategy_context.get_strategy(
).extended.colocate_vars_with(self.moving_stddev):
self.renorm_stddev = _renorm_variable('renorm_stddev', param_shape,
moving_stddev_initializer)
finally:
if partitioner:
self._scope.set_partitioner(partitioner)
self.built = True
def drop_connect(inputs, is_training, survival_prob):
"""Drop the entire conv with given survival probability."""
# "Deep Networks with Stochastic Depth", https://arxiv.org/pdf/1603.09382.pdf
if not is_training:
return inputs
# Compute tensor.
batch_size = tf.shape(inputs)[0]
random_tensor = survival_prob
random_tensor += tf.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
binary_tensor = tf.floor(random_tensor)
# Unlike conventional way that multiply survival_prob at test time, here we
# divide survival_prob at training time, such that no addition compute is
# needed at test time.
output = tf.div(inputs, survival_prob) * binary_tensor
return output
def archive_ckpt(ckpt_eval, ckpt_objective, ckpt_path):
"""Archive a checkpoint if the metric is better."""
ckpt_dir, ckpt_name = os.path.split(ckpt_path)
saved_objective_path = os.path.join(ckpt_dir, 'best_objective.txt')
saved_objective = float('-inf')
if tf.gfile.Exists(saved_objective_path):
with tf.gfile.GFile(saved_objective_path, 'r') as f:
saved_objective = float(f.read())
if saved_objective > ckpt_objective:
logging.info('Ckpt %s is worse than %s', ckpt_objective, saved_objective)
return False
filenames = tf.gfile.Glob(ckpt_path + '.*')
if filenames is None:
logging.info('No files to copy for checkpoint %s', ckpt_path)
return False
# Clear the old folder.
dst_dir = os.path.join(ckpt_dir, 'archive')
if tf.gfile.Exists(dst_dir):
tf.gfile.DeleteRecursively(dst_dir)
tf.gfile.MakeDirs(dst_dir)
# Write checkpoints.
for f in filenames:
dest = os.path.join(dst_dir, os.path.basename(f))
tf.gfile.Copy(f, dest, overwrite=True)
ckpt_state = tf.train.generate_checkpoint_state_proto(
dst_dir,
model_checkpoint_path=ckpt_name,
all_model_checkpoint_paths=[ckpt_name])
with tf.gfile.GFile(os.path.join(dst_dir, 'checkpoint'), 'w') as f:
f.write(str(ckpt_state))
with tf.gfile.GFile(os.path.join(dst_dir, 'best_eval.txt'), 'w') as f:
f.write('%s' % ckpt_eval)
# Update the best objective.
with tf.gfile.GFile(saved_objective_path, 'w') as f:
f.write('%f' % ckpt_objective)
logging.info('Copying checkpoint %s to %s', ckpt_path, dst_dir)
return True
def get_ema_vars():
"""Get all exponential moving average (ema) variables."""
ema_vars = tf.trainable_variables() + tf.get_collection('moving_vars')
for v in tf.global_variables():
# We maintain mva for batch norm moving mean and variance as well.
if 'moving_mean' in v.name or 'moving_variance' in v.name:
ema_vars.append(v)
return list(set(ema_vars))
class DepthwiseConv2D(tf.keras.layers.DepthwiseConv2D, tf.layers.Layer):
"""Wrap keras DepthwiseConv2D to tf.layers."""
pass
class Conv2D(tf.layers.Conv2D):
"""Wrapper for Conv2D with specialization for fast inference."""
def _bias_activation(self, outputs):
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias, data_format='NCHW')
if self.activation is not None:
return self.activation(outputs)
return outputs
def _can_run_fast_1x1(self, inputs):
batch_size = inputs.shape.as_list()[0]
return (self.data_format == 'channels_first' and
batch_size == 1 and
self.kernel_size == (1, 1))
def _call_fast_1x1(self, inputs):
# Compute the 1x1 convolution as a matmul.
inputs_shape = tf.shape(inputs)
flat_inputs = tf.reshape(inputs, [inputs_shape[1], -1])
flat_outputs = tf.matmul(
tf.squeeze(self.kernel),
flat_inputs,
transpose_a=True)
outputs_shape = tf.concat([[1, self.filters], inputs_shape[2:]], axis=0)
outputs = tf.reshape(flat_outputs, outputs_shape)
# Handle the bias and activation function.
return self._bias_activation(outputs)
def call(self, inputs):
if self._can_run_fast_1x1(inputs):
return self._call_fast_1x1(inputs)
return super(Conv2D, self).call(inputs)
class EvalCkptDriver(object):
"""A driver for running eval inference.
Attributes:
model_name: str. Model name to eval.
batch_size: int. Eval batch size.
image_size: int. Input image size, determined by model name.
num_classes: int. Number of classes, default to 1000 for ImageNet.
include_background_label: whether to include extra background label.
advprop_preprocessing: whether to use advprop preprocessing.
"""
def __init__(self,
model_name,
batch_size=1,
image_size=224,
num_classes=1000,
include_background_label=False,
advprop_preprocessing=False):
"""Initialize internal variables."""
self.model_name = model_name
self.batch_size = batch_size
self.num_classes = num_classes
self.include_background_label = include_background_label
self.image_size = image_size
self.advprop_preprocessing = advprop_preprocessing
def restore_model(self, sess, ckpt_dir, enable_ema=True, export_ckpt=None):
"""Restore variables from checkpoint dir."""
sess.run(tf.global_variables_initializer())
checkpoint = tf.train.latest_checkpoint(ckpt_dir)
if enable_ema:
ema = tf.train.ExponentialMovingAverage(decay=0.0)
ema_vars = get_ema_vars()
var_dict = ema.variables_to_restore(ema_vars)
ema_assign_op = ema.apply(ema_vars)
else:
var_dict = get_ema_vars()
ema_assign_op = None
tf.train.get_or_create_global_step()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(var_dict, max_to_keep=1)
saver.restore(sess, checkpoint)
if export_ckpt:
if ema_assign_op is not None:
sess.run(ema_assign_op)
saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True)
saver.save(sess, export_ckpt)
def build_model(self, features, is_training):
"""Build model with input features."""
del features, is_training
raise ValueError('Must be implemented by subclasses.')
def get_preprocess_fn(self):
raise ValueError('Must be implemented by subclsses.')
def build_dataset(self, filenames, labels, is_training):
"""Build input dataset."""
batch_drop_remainder = False
if 'condconv' in self.model_name and not is_training:
# CondConv layers can only be called with known batch dimension. Thus, we
# must drop all remaining examples that do not make up one full batch.
# To ensure all examples are evaluated, use a batch size that evenly
# divides the number of files.
batch_drop_remainder = True
num_files = len(filenames)
if num_files % self.batch_size != 0:
tf.logging.warn('Remaining examples in last batch are not being '
'evaluated.')
filenames = tf.constant(filenames)
labels = tf.constant(labels)
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
def _parse_function(filename, label):
image_string = tf.read_file(filename)
preprocess_fn = self.get_preprocess_fn()
image_decoded = preprocess_fn(
image_string, is_training, image_size=self.image_size)
image = tf.cast(image_decoded, tf.float32)
return image, label
dataset = dataset.map(_parse_function)
dataset = dataset.batch(self.batch_size,
drop_remainder=batch_drop_remainder)
iterator = dataset.make_one_shot_iterator()
images, labels = iterator.get_next()
return images, labels
def run_inference(self,
ckpt_dir,
image_files,
labels,
enable_ema=True,
export_ckpt=None):
"""Build and run inference on the target images and labels."""
label_offset = 1 if self.include_background_label else 0
with tf.Graph().as_default(), tf.Session() as sess:
images, labels = self.build_dataset(image_files, labels, False)
probs = self.build_model(images, is_training=False)
if isinstance(probs, tuple):
probs = probs[0]
self.restore_model(sess, ckpt_dir, enable_ema, export_ckpt)
prediction_idx = []
prediction_prob = []
for _ in range(len(image_files) // self.batch_size):
out_probs = sess.run(probs)
idx = np.argsort(out_probs)[::-1]
prediction_idx.append(idx[:5] - label_offset)
prediction_prob.append([out_probs[pid] for pid in idx[:5]])
# Return the top 5 predictions (idx and prob) for each image.
return prediction_idx, prediction_prob
def eval_example_images(self,
ckpt_dir,
image_files,
labels_map_file,
enable_ema=True,
export_ckpt=None):
"""Eval a list of example images.
Args:
ckpt_dir: str. Checkpoint directory path.
image_files: List[str]. A list of image file paths.
labels_map_file: str. The labels map file path.
enable_ema: enable expotential moving average.
export_ckpt: export ckpt folder.
Returns:
A tuple (pred_idx, and pred_prob), where pred_idx is the top 5 prediction
index and pred_prob is the top 5 prediction probability.
"""
classes = json.loads(tf.gfile.Open(labels_map_file).read())
pred_idx, pred_prob = self.run_inference(
ckpt_dir, image_files, [0] * len(image_files), enable_ema, export_ckpt)
for i in range(len(image_files)):
print('predicted class for image {}: '.format(image_files[i]))
for j, idx in enumerate(pred_idx[i]):
print(' -> top_{} ({:4.2f}%): {} '.format(j, pred_prob[i][j] * 100,
classes[str(idx)]))
return pred_idx, pred_prob
def eval_imagenet(self, ckpt_dir, imagenet_eval_glob,
imagenet_eval_label, num_images, enable_ema, export_ckpt):
"""Eval ImageNet images and report top1/top5 accuracy.
Args:
ckpt_dir: str. Checkpoint directory path.
imagenet_eval_glob: str. File path glob for all eval images.
imagenet_eval_label: str. File path for eval label.
num_images: int. Number of images to eval: -1 means eval the whole
dataset.
enable_ema: enable expotential moving average.
export_ckpt: export checkpoint folder.
Returns:
A tuple (top1, top5) for top1 and top5 accuracy.
"""
imagenet_val_labels = [int(i) for i in tf.gfile.GFile(imagenet_eval_label)]
imagenet_filenames = sorted(tf.gfile.Glob(imagenet_eval_glob))
if num_images < 0:
num_images = len(imagenet_filenames)
image_files = imagenet_filenames[:num_images]
labels = imagenet_val_labels[:num_images]
pred_idx, _ = self.run_inference(
ckpt_dir, image_files, labels, enable_ema, export_ckpt)
top1_cnt, top5_cnt = 0.0, 0.0
for i, label in enumerate(labels):
top1_cnt += label in pred_idx[i][:1]
top5_cnt += label in pred_idx[i][:5]
if i % 100 == 0:
print('Step {}: top1_acc = {:4.2f}% top5_acc = {:4.2f}%'.format(
i, 100 * top1_cnt / (i + 1), 100 * top5_cnt / (i + 1)))
sys.stdout.flush()
top1, top5 = 100 * top1_cnt / num_images, 100 * top5_cnt / num_images
print('Final: top1_acc = {:4.2f}% top5_acc = {:4.2f}%'.format(top1, top5))
return top1, top5
| 39.884438 | 91 | 0.664362 |
8efec28a01a88f628caca162b724c7249514af5b | 94,610 | py | Python | MetaScreener/external_sw/mgltools/MGLToolsPckgs/ViewerFramework/basicCommand.py | bio-hpc/metascreener | 6900497629f601c4b6c0c37da26de58ffa221988 | [
"Apache-2.0"
] | 8 | 2021-12-14T21:30:01.000Z | 2022-02-14T11:30:03.000Z | MetaScreener/external_sw/mgltools/MGLToolsPckgs/ViewerFramework/basicCommand.py | bio-hpc/metascreener | 6900497629f601c4b6c0c37da26de58ffa221988 | [
"Apache-2.0"
] | null | null | null | MetaScreener/external_sw/mgltools/MGLToolsPckgs/ViewerFramework/basicCommand.py | bio-hpc/metascreener | 6900497629f601c4b6c0c37da26de58ffa221988 | [
"Apache-2.0"
] | null | null | null | ## Automatically adapted for numpy.oldnumeric Jul 23, 2007 by
############################################################################
#
# Author: Michel F. SANNER
#
# Copyright: M. Sanner TSRI 2000
#
#############################################################################
"""
Module implementing the basic commands that are present when instanciating
a ViewerFramework class or ViewerFramework derived class.
- loadCommandCommand
- loadModuleCommand
- loadMacroCommand
- ExitCommand
- ShellCommand
- UndoCommand
- ResetUndoCommand.
"""
# $Header: /opt/cvs/python/packages/share1.5/ViewerFramework/basicCommand.py,v 1.154 2013/09/06 23:50:17 sanner Exp $
#
# $Id: basicCommand.py,v 1.154 2013/09/06 23:50:17 sanner Exp $
#
import os, sys, subprocess
from mglutil.gui.InputForm.Tk.gui import InputFormDescr
from mglutil.util.callback import CallBackFunction
from mglutil.gui.BasicWidgets.Tk.customizedWidgets import ListChooser, \
LoadButton, kbScrolledListBox
from mglutil.util.packageFilePath import findFilePath, \
findModulesInPackage, getResourceFolderWithVersion
import types, Tkinter, Pmw, os, sys, traceback
from string import join
import tkMessageBox
from ViewerFramework.VFCommand import Command, CommandGUI
import warnings
import string
commandslist=[]
cmd_docslist={}
def findAllVFPackages():
"""Returns a list of package names found in sys.path"""
packages = {}
for p in ['.']+sys.path:
flagline = []
if not os.path.exists(p) or not os.path.isdir(p):
continue
files = os.listdir(p)
for f in files:
pdir = os.path.join(p, f)
if not os.path.isdir(pdir):
continue
if os.path.exists( os.path.join( pdir, '__init__.py')) :
fptr =open("%s/__init__.py" %pdir)
Lines = fptr.readlines()
flagline =filter(lambda x:x.startswith("packageContainsVFCommands"),Lines)
if not flagline ==[]:
if not packages.has_key(f):
packages[f] = pdir
return packages
class NEWUndoCommand(Command):
"""pops undo string from the stack and executes it in the ViewerFrameworks
scope
\nPackage : ViewerFramework
\nModule : basicCommand.py
\nClass : UndoCommand
\nCommand : Undo
\nSynopsis:\n
None <- Undo()
"""
def validateUserPref(self, value):
try:
val = int(value)
if val >-1:
return 1
else:
return 0
except:
return 0
def onAddCmdToViewer(self):
doc = """Number of commands that can be undone"""
if self.vf.hasGui:
TButton = self.vf.GUI.menuBars['Toolbar']._frame.toolbarButtonDict['Undo']
self.balloon = Pmw.Balloon(self.vf.GUI.ROOT)
self.balloon.bind(TButton, 'Undo')
self.setLabel()
self.vf.userpref.add( 'Number of Undo', 100,
validateFunc=self.validateUserPref,
doc=doc)
def addUndoCall(self, cmdList, name):
#print self.name, "addUndoCall for:", name
# FIXME handle user pref
self.cmdStack.append( (cmdList, name) )
maxLen = self.vf.userpref['Number of Undo']['value']
if maxLen>0 and len(self.cmdStack)>maxLen:
forget = self.cmdStack[:-maxLen]
self.cmdStack = self.cmdStack[-maxLen:]
for cmdList, name in forget:
for cmd, args, kw in cmdList:
if hasattr(cmd, "handleForgetUndo"):
cmd.handleForgetUndo(*args, **kw)
self.setLabel()
def doit(self, **kw):
"""
pop cmdList from stack and execute each cmd in cmdlList
"""
stack = self.cmdStack
if stack:
cmdList, name = stack.pop()
ncmds = len(cmdList)
self._cmdList = ([], name) # this list will gather undoCommands generated during the undo
for i, (cmd, args, kw) in enumerate(cmdList):
self.inUndo = ncmds-i-1
cmd( *args, **kw)
self._cmdList = () # this list will gather undoCommands generated during the undo
#self.inUndo = True
#for cmd, args, kw in cmdList:
# cmd( *args, **kw)
#self.inUndo = False
self.inUndo = -1
else:
self.vf.warningMsg('ERROR: Undo called for %s when undo stack is empty'%\
self.name)
#raise RuntimeError('Undo called for %s when undo stack is empty'%\
# self.name)
self.setLabel()
def __init__(self, func=None):
Command.__init__(self, func)
# cmdStack is a list of tuples providing 1-a list of commands to execute and 2 a name for this operation
# the list of commands is in the following format [ (cmd, *args, **kw) ]
self.cmdStack = []
self.inUndo = -1 # will be 0 or a positive integer while we are executing command(s) to undo last operation.
self._cmdList = () # this tuple will contain a list that will collect negation of commands during a loop over commands
# corresponding to an Undo (or Redo in subclassed command)
def __call__(self, **kw):
"""None<---NEWundo()
"""
self.doitWrapper(topCommand=0, log=0, busyIdle=1)
def guiCallback(self, event=None):
self.doitWrapper(topCommand=0, log=0, busyIdle=1)
def setLabel(self):
"""change menu entry label to show command name"""
if not self.vf.hasGui: return
cmdmenuEntry = self.GUI.menu[4]['label']
if len(self.cmdStack)==0:
state = 'disabled'
label = 'Undo '
if self.vf.GUI.menuBars.has_key('Toolbar'):
TButton = self.vf.GUI.menuBars['Toolbar']._frame.toolbarButtonDict['Undo']
TButton.disable()
self.balloon.bind(TButton, label)
#rebind other functions from toolbarbutton
TButton.bind("<Enter>", TButton.buttonEnter, '+')
TButton.bind("<Leave>", TButton.buttonLeave, '+')
TButton.bind("<ButtonPress-1>", TButton.buttonDown)
TButton.bind("<ButtonRelease-1>", TButton.buttonUp)
else:
state='normal'
label = 'Undo ' + self.cmdStack[-1][1]
if self.vf.GUI.menuBars.has_key('Toolbar'):
TButton = self.vf.GUI.menuBars['Toolbar']._frame.toolbarButtonDict['Undo']
TButton.enable()
self.balloon.bind(TButton, label)
#rebind other functions from toolbarbutton
TButton.bind("<Enter>", TButton.buttonEnter, '+')
TButton.bind("<Leave>", TButton.buttonLeave, '+')
TButton.bind("<ButtonPress-1>", TButton.buttonDown)
TButton.bind("<ButtonRelease-1>", TButton.buttonUp)
self.vf.GUI.configMenuEntry(self.GUI.menuButton, cmdmenuEntry,
label=label, state=state)
self.GUI.menu[4]['label']=label
def resetCmdStack(self):
#remove all items from self.cmdStack
if len(self.cmdStack):
del(self.cmdStack)
self.cmdStack = []
self.setLabel()
class RedoCommand(NEWUndoCommand):
"""pops redo cmdList from the stack and executes it in the ViewerFrameworks
scope
\nPackage : ViewerFramework
\nModule : basicCommand.py
\nClass : RedoCommand
\nCommand : Undo
\nSynopsis:\n
None <- Undo()
"""
def setLabel(self):
"""change menu entry label to show command name"""
if not self.vf.hasGui: return
cmdmenuEntry = self.GUI.menu[4]['label']
if len(self.cmdStack)==0:
state = 'disabled'
label = 'Redo '
if self.vf.GUI.menuBars.has_key('Toolbar'):
TButton = self.vf.GUI.menuBars['Toolbar']._frame.toolbarButtonDict['Redo']
TButton.disable()
self.balloon.bind(TButton, label)
#rebind other functions from toolbarbutton
TButton.bind("<Enter>", TButton.buttonEnter, '+')
TButton.bind("<Leave>", TButton.buttonLeave, '+')
TButton.bind("<ButtonPress-1>", TButton.buttonDown)
TButton.bind("<ButtonRelease-1>", TButton.buttonUp)
else:
state='normal'
label = 'Redo ' + self.cmdStack[-1][1]
if self.vf.GUI.menuBars.has_key('Toolbar'):
TButton = self.vf.GUI.menuBars['Toolbar']._frame.toolbarButtonDict['Redo']
TButton.enable()
self.balloon.bind(TButton, label)
#rebind other functions from toolbarbutton
TButton.bind("<Enter>", TButton.buttonEnter, '+')
TButton.bind("<Leave>", TButton.buttonLeave, '+')
TButton.bind("<ButtonPress-1>", TButton.buttonDown)
TButton.bind("<ButtonRelease-1>", TButton.buttonUp)
self.vf.GUI.configMenuEntry(self.GUI.menuButton, cmdmenuEntry,
label=label, state=state)
self.GUI.menu[4]['label']=label
class UndoCommand(Command):
"""pops undo string from the stack and executes it in the ViewerFrameworks
scope
\nPackage : ViewerFramework
\nModule : basicCommand.py
\nClass : UndoCommand
\nCommand : Undo
\nSynopsis:\n
None <- Undo()
"""
def __init__(self, func=None):
Command.__init__(self, func)
self.ctr = 1 # used to assure unique keys for _undoArgs
self._undoArgs = {} # this dict is used to save large Python objects
# that we do not want to turn into strings
def get_ctr(self):
#used to build unique '_undoArg_#' strings
#cannot simply use len(self_undoArgs)+1
#because some entries may have been removed
# for instance, using len(self_undoArgs)+1 only
# add 1: _undoArg_1, add another: _undoArg_2
# remove _undoArg_1
# next addition would replicate _undoArg_2
if not len(self._undoArgs):
self.ctr = 1
else:
self.ctr += 1
return self.ctr
def saveUndoArg(self, arg):
"""Add arg to self._undoArgs under a unique name and returns this name
"""
name = '_undoArg_%d'%len(self._undoArgs)
i = 1
while self._undoArgs.has_key(name):
name = '_undoArg_%d'%(self.get_ctr())
i += 1
self._undoArgs[name] = arg
return name
def validateUserPref(self, value):
try:
val = int(value)
if val >-1:
return 1
else:
return 0
except:
return 0
def onAddCmdToViewer(self):
doc = """Number of commands that can be undone"""
return
if self.vf.hasGui:
TButton = self.vf.GUI.menuBars['Toolbar']._frame.toolbarButtonDict['Undo']
self.balloon = Pmw.Balloon(self.vf.GUI.ROOT)
self.balloon.bind(TButton, 'Undo')
self.setLabel()
self.vf.userpref.add( 'Number of Undo', 100,
validateFunc=self.validateUserPref,
doc=doc)
def doit(self):
if len(self.vf.undoCmdStack):
command = self.vf.undoCmdStack.pop()[0]
localDict = {'self':self.vf}
localDict.update( self._undoArgs ) # add undoArgs to local dict
exec( command, sys.modules['__main__'].__dict__, localDict)
# remove _undoArg_%d used by this command from self._undoArgs dict
ind = command.find('_undoArg_')
while ind != -1 and ind < len(command)-10:
name = command[ind: ind+9]
end = ind+9
# add digits following string
while command[end].isdigit():
name += command[end]
end +=1
nodes = self._undoArgs[name]
del self._undoArgs[name]
new_start = ind + len(name)
ind = command[new_start:].find('_undoArg_')
if ind!=-1:
ind = ind + new_start
#exec( command, self.vf.__dict__ )
self.setLabel()
def guiCallback(self, event=None):
self.doitWrapper(topCommand=0, log=0, busyIdle=1)
def __call__(self, **kw):
"""None<---Undo()
"""
self.doitWrapper(topCommand=0, log=0, busyIdle=1)
def addEntry(self, undoString, menuString):
self.vf.undoCmdStack.append( (undoString, menuString) )
maxLen = self.vf.userpref['Number of Undo']['value']
if maxLen>0 and len(self.vf.undoCmdStack)>maxLen:
self.vf.undoCmdStack = self.vf.undoCmdStack[-maxLen:]
self.vf.undo.setLabel()
def setLabel(self):
"""change menu entry label to show command name"""
return
if not self.vf.hasGui: return
cmdmenuEntry = self.GUI.menu[4]['label']
if len(self.vf.undoCmdStack)==0:
state = 'disabled'
label = 'Undo '
if self.vf.GUI.menuBars.has_key('Toolbar'):
TButton = self.vf.GUI.menuBars['Toolbar']._frame.toolbarButtonDict['Undo']
TButton.disable()
#if hasattr(self,'balloon'):
# self.balloon.destroy()
#self.balloon = Pmw.Balloon(self.vf.GUI.ROOT)
#self.balloon.bind(TButton, 'Undo')
#rebind other functions from toolbarbutton
TButton.bind("<Enter>", TButton.buttonEnter, '+')
TButton.bind("<Leave>", TButton.buttonLeave, '+')
TButton.bind("<ButtonPress-1>", TButton.buttonDown)
TButton.bind("<ButtonRelease-1>", TButton.buttonUp)
else:
state='normal'
label = 'Undo ' + self.vf.undoCmdStack[-1][1]
if self.vf.GUI.menuBars.has_key('Toolbar'):
TButton = self.vf.GUI.menuBars['Toolbar']._frame.toolbarButtonDict['Undo']
TButton.enable()
#if hasattr(self,'balloon'):
# self.balloon.destroy()
#self.balloon = Pmw.Balloon(self.vf.GUI.ROOT)
self.balloon.bind(TButton, label)
#rebind other functions from toolbarbutton
TButton.bind("<Enter>", TButton.buttonEnter, '+')
TButton.bind("<Leave>", TButton.buttonLeave, '+')
TButton.bind("<ButtonPress-1>", TButton.buttonDown)
TButton.bind("<ButtonRelease-1>", TButton.buttonUp)
self.vf.GUI.configMenuEntry(self.GUI.menuButton, cmdmenuEntry,
label=label, state=state)
self.GUI.menu[4]['label']=label
class RemoveCommand(Command):
def loadCommands(self):
for key in self.vf.removableCommands.settings:
try:
self.vf.browseCommands.doit(key, self.vf.removableCommands.settings[key][0],
self.vf.removableCommands.settings[key][1])
except Exception, inst:
print __file__, inst
def guiCallback(self):
idf = InputFormDescr(title='Remove Command')
idf.append({'name':'cmd',
'widgetType':kbScrolledListBox,
'wcfg':{'items':self.vf.removableCommands.settings.keys(),
'listbox_exportselection':0,
'listbox_selectmode':'extended',
'labelpos':'nw',
'label_text':'Available commands:',
#'dblclickcommand':self.loadCmd_cb,
#'selectioncommand':self.displayCmd_cb,
},
'gridcfg':{'sticky':'wesn', 'row':-1}})
val = self.vf.getUserInput(idf, modal=1, blocking=1)
if val:
self.vf.removableCommands.settings.pop(val['cmd'][0])
self.vf.removableCommands.saveAllSettings()
txt = "You need to restart for the changes to take effect."
tkMessageBox.showinfo("Restart is Needed", txt)
class ResetUndoCommand(Command):
""" Class to reset Undo()
\nPackage : ViewerFramework
\nModule : basicCommand.py
\nClass : ResetUndoCommand
\nCommand : resetUndo
\nSynopsis:\n
None<---resetUndo()
"""
def doit(self):
self.vf.undoCmdStack = []
self.vf.undo._undoArgs = {} # reset dict used to save large Python objects
self.vf.undo.setLabel()
for command in self.vf.commands:
if hasattr(command, 'command'): # added to handle 'computeSheet2D' case
command.undoStack = []
def __call__(self, **kw):
"""None<---resetUndo()
"""
apply( self.doitWrapper, (), kw )
class BrowseCommandsCommand(Command):
"""Command to load dynamically either modules or individual commands
in the viewer.
\nPackage : ViewerFramework
\nModule : basicCommand.py
\nClass : BrowseCommandsCommand
\nCommand : browseCommands
\nSynopsis:\n
None <-- browseCommands(module, commands=None, package=None, **kw)
\nRequired Arguements:\n
module --- name of the module(eg:colorCommands)
\nOptional Arguements:\n
commnads --- one list of commands to load
\npackage --- name of the package to which module belongs(eg:Pmv,Vision)
"""
def __init__(self, func=None):
Command.__init__(self, func)
self.allPack = {}
self.packMod = {}
self.allPackFlag = False
self.txtGUI = ""
def doit(self, module, commands=None, package=None, removable=False):
# if removable:
# self.vf.removableCommands.settings[module] = [commands, package]
# self.vf.removableCommands.saveAllSettings()
# If the package is not specified the default is the first library
global commandslist,cmd_docslist
if package is None: package = self.vf.libraries[0]
importName = package + '.' + module
try:
mod = __import__(importName, globals(), locals(),
[module])
except:
if self.cmdForms.has_key('loadCmds') and \
self.cmdForms['loadCmds'].f.winfo_toplevel().wm_state() == \
'normal':
self.vf.warningMsg("ERROR: Could not load module %s"%module,
parent = self.cmdForms['loadCmds'].root)
elif self.vf.loadModule.cmdForms.has_key('loadModule') and \
self.vf.loadModule.cmdForms['loadModule'].f.winfo_toplevel().wm_state() == \
'normal':
self.vf.warningMsg("ERROR: Could not load module %s"%module,
parent = self.vf.loadModule.cmdForms['loadModule'].root)
else:
self.vf.warningMsg("ERROR: Could not load module %s"%module)
traceback.print_exc()
return 'ERROR'
if commands is None:
if hasattr(mod,"initModule"):
if self.vf.hasGui :
mod.initModule(self.vf)
else :
#if there is noGUI and if we want to have multiple session of mv
#need to instanciate new commands, and not use the global dictionay commandList
if hasattr(mod, 'commandList'):
for d in mod.commandList:
d['cmd'] = d['cmd'].__class__()
#print "load all comands ", d['cmd'], d['name'], d['gui']
self.vf.addCommand( d['cmd'], d['name'], d['gui'])
else : print mod
if hasattr(mod, 'commandList'):
for x in mod.commandList:
cmd=x['name']
c=x['cmd']
#print 'CCCCCCC', cmd
if cmd not in cmd_docslist:
if hasattr(c,'__doc__'):
cmd_docslist[cmd]=c.__doc__
if x['gui']:
if x['gui'].menuDict:
if len(self.txtGUI) < 800:
self.txtGUI += "\n"+x['gui'].menuDict['menuButtonName']
if x['gui'].menuDict['menuCascadeName']:
self.txtGUI += "->"+ x['gui'].menuDict['menuCascadeName']
self.txtGUI += "->"+x['gui'].menuDict['menuEntryLabel']
if cmd not in commandslist:
commandslist.append(cmd)
#print 'ZZZZZZZZZZZZ', mod
else:
if self.cmdForms.has_key('loadCmds') and \
self.cmdForms['loadCmds'].f.winfo_toplevel().wm_state() == \
'normal':
self.vf.warningMsg("ERROR: Could not load module %s"%module,
parent = self.cmdForms['loadCmds'].root)
elif self.vf.loadModule.cmdForms.has_key('loadModule') and \
self.vf.loadModule.cmdForms['loadModule'].f.winfo_toplevel().wm_state() == \
'normal':
self.vf.warningMsg("ERROR: Could not load module %s"%module,
parent = self.vf.loadModule.cmdForms['loadModule'].root)
else:
self.vf.warningMsg("ERROR: Could not load module %s"%module)
return "ERROR"
else:
if not type(commands) in [types.ListType, types.TupleType]:
commands = [commands,]
if not hasattr(mod, 'commandList'):
return
for cmd in commands:
d = filter(lambda x: x['name'] == cmd, mod.commandList)
if len(d) == 0:
self.vf.warningMsg("Command %s not found in module %s.%s"%
(cmd, package, module))
continue
d = d[0]
if cmd not in cmd_docslist:
if hasattr(d['cmd'],'__doc__'):
cmd_docslist[cmd]=d['cmd'].__doc__
if cmd not in commandslist:
commandslist.append(cmd)
if not self.vf.hasGui :
#if there is noGUI and if we want to have multiple session of mv
#need to instanciate new commands, and not use the global dictionay commandList
#print "load specific comands ", d['cmd'], d['name'], d['gui']
d['cmd'] = d['cmd'].__class__()
self.vf.addCommand(d['cmd'], d['name'], d['gui'])
def __call__(self, module, commands=None, package=None, **kw):
"""None<---browseCommands(module, commands=None, package=None, **kw)
\nmodule --- name of the module(eg:colorCommands)
\ncommnads --- one list of commands to load
\npackage --- name of the package to which module belongs(eg:Pmv,Vision)
"""
kw['commands'] = commands
kw['package'] = package
apply(self.doitWrapper, (module,), kw )
def buildFormDescr(self, formName):
import Tkinter
if not formName == 'loadCmds': return
idf = InputFormDescr(title='Load Modules and Commands')
pname = self.vf.libraries
#when Pvv.startpvvCommnads is loaded some how Volume.Pvv is considered
#as seperate package and is added to packages list in the widget
#To avoid this packages having '.' are removed
for p in pname:
if '.' in p:
ind = pname.index(p)
del pname[ind]
idf.append({'name':'packList',
'widgetType':kbScrolledListBox,
'wcfg':{'items':pname,
#'defaultValue':pname[0],
'listbox_exportselection':0,
'labelpos':'nw',
'label_text':'Select a package:',
#'dblclickcommand':self.loadMod_cb,
'selectioncommand':self.displayMod_cb
},
'gridcfg':{'sticky':'wesn'}})
idf.append({'name':'modList',
'widgetType':kbScrolledListBox,
'wcfg':{'items':[],
'listbox_exportselection':0,
'labelpos':'nw',
'label_text':'Select a module:',
#'dblclickcommand':self.loadMod_cb,
'selectioncommand':self.displayCmds_cb,
},
'gridcfg':{'sticky':'wesn', 'row':-1}})
idf.append({'name':'cmdList',
'widgetType':kbScrolledListBox,
'wcfg':{'items':[],
'listbox_exportselection':0,
'listbox_selectmode':'extended',
'labelpos':'nw',
'label_text':'Available commands:',
#'dblclickcommand':self.loadCmd_cb,
'selectioncommand':self.displayCmd_cb,
},
'gridcfg':{'sticky':'wesn', 'row':-1}})
# idf.append({'name':'docbutton',
# 'widgetType':Tkinter.Checkbutton,
# #'parent':'DOCGROUP',
# 'defaultValue':0,
# 'wcfg':{'text':'Show documentation',
# 'onvalue':1,
# 'offvalue':0,
# 'command':self.showdoc_cb,
# 'variable':Tkinter.IntVar()},
# 'gridcfg':{'sticky':'nw','columnspan':3}})
idf.append({'name':'DOCGROUP',
'widgetType':Pmw.Group,
'container':{'DOCGROUP':"w.interior()"},
'collapsedsize':0,
'wcfg':{'tag_text':'Description'},
'gridcfg':{'sticky':'wnse', 'columnspan':3}})
idf.append({'name':'doclist',
'widgetType':kbScrolledListBox,
'parent':'DOCGROUP',
'wcfg':{'items':[],
'listbox_exportselection':0,
'listbox_selectmode':'extended',
},
'gridcfg':{'sticky':'wesn', 'columnspan':3}})
idf.append({'name':'allPacks',
'widgetType':Tkinter.Button,
'wcfg':{'text':'Show all packages',
'command':self.allPacks_cb},
'gridcfg':{'sticky':'ew'}})
idf.append({'name':'loadMod',
'widgetType':Tkinter.Button,
'wcfg':{'text':'Load selected module',
'command':self.loadMod_cb},
'gridcfg':{'sticky':'ew', 'row':-1}})
# idf.append({'name':'loadCmd',
# 'widgetType':Tkinter.Button,
# 'wcfg':{'text':'Load Command',
# 'command':self.loadCmd_cb},
# 'gridcfg':{'sticky':'ew', 'row':-1}})
idf.append({'name':'dismiss',
'widgetType':Tkinter.Button,
'wcfg':{'text':'Dismiss',
'command':self.dismiss_cb},
'gridcfg':{'sticky':'ew', 'row':-1}})
# idf.append({'name':'dismiss',
# 'widgetType':Tkinter.Button,
# 'wcfg':{'text':'DISMISS',
# 'command':self.dismiss_cb,
# },
# 'gridcfg':{'sticky':Tkinter.E+Tkinter.W,'columnspan':3}})
return idf
def guiCallback(self):
self.vf.GUI.ROOT.config(cursor='watch')
self.vf.GUI.ROOT.update()
if self.allPack == {}:
self.allPack = findAllVFPackages()
val = self.showForm('loadCmds', force=1,modal=0,blocking=0)
ebn = self.cmdForms['loadCmds'].descr.entryByName
# docb=ebn['docbutton']['widget']
# var=ebn['docbutton']['wcfg']['variable'].get()
# if var==0:
# dg=ebn['DOCGROUP']['widget']
# dg.collapse()
self.vf.GUI.ROOT.config(cursor='')
def dismiss_cb(self, event=None):
self.cmdForms['loadCmds'].withdraw()
def allPacks_cb(self, event=None):
ebn = self.cmdForms['loadCmds'].descr.entryByName
packW = ebn['packList']['widget']
if not self.allPackFlag:
packName = self.allPack.keys()
packW.setlist(packName)
ebn['allPacks']['widget'].configure(text='Show default packages')
self.allPackFlag = True
else:
packName = self.vf.libraries
packW.setlist(packName)
ebn['allPacks']['widget'].configure(text='Show all packages')
self.allPackFlag = False
ebn['modList']['widget'].clear()
ebn['cmdList']['widget'].clear()
# def showdoc_cb(self,event=None):
# #when a show documentation is on and a module is selected then
# #expands dg else dg is collapsed
# ebn = self.cmdForms['loadCmds'].descr.entryByName
# docb=ebn['docbutton']['widget']
# var=ebn['docbutton']['wcfg']['variable'].get()
# dg=ebn['DOCGROUP']['widget']
# docw=ebn['doclist']['widget']
# packW = ebn['packList']['widget']
# psel=packW.getcurselection()
# if var==0:
# dg.collapse()
# if var==1 and psel:
# if docw.size()>0:
# dg.expand()
def displayMod_cb(self, event=None):
#print "displayMod_cb"
# c = self.cmdForms['loadCmds'].mf.cget('cursor')
# self.cmdForms['loadCmds'].mf.configure(cursor='watch')
# self.cmdForms['loadCmds'].mf.update_idletasks()
ebn = self.cmdForms['loadCmds'].descr.entryByName
# docb=ebn['docbutton']['widget']
# var=ebn['docbutton']['wcfg']['variable'].get()
# dg = ebn['DOCGROUP']['widget']
# dg.collapse()
packW = ebn['packList']['widget']
packs = packW.getcurselection()
if len(packs) == 0:
return
packName = packs[0]
if not self.packMod.has_key(packName):
package = self.allPack[packName]
self.packMod[packName] = findModulesInPackage(package,"^def initModule",fileNameFilters=['Command'])
self.currentPack = packName
modNames = []
for key, value in self.packMod[packName].items():
pathPack = key.split(os.path.sep)
if pathPack[-1] == packName:
newModName = map(lambda x: x[:-3], value)
#for mname in newModName:
#if "Command" not in mname :
#ind = newModName.index(mname)
#del newModName[ind]
modNames = modNames+newModName
else:
pIndex = pathPack.index(packName)
prefix = join(pathPack[pIndex+1:], '.')
newModName = map(lambda x: "%s.%s"%(prefix, x[:-3]), value)
#for mname in newModName:
#if "Command" not in mname :
#ind = newModName.index(mname)
#del newModName[ind]
modNames = modNames+newModName
modNames.sort()
modW = ebn['modList']['widget']
modW.setlist(modNames)
# and clear contents in self.libraryGUI
cmdW = ebn['cmdList']['widget']
cmdW.clear()
m = __import__(packName, globals(), locals(),[])
d = []
docstring=m.__doc__
#d.append(m.__doc__)
docw = ebn['doclist']['widget']
docw.clear()
#formatting documentation.
if docstring!=None :
if '\n' in docstring:
x = string.split(docstring,"\n")
for i in x:
if i !='':
d.append(i)
if len(d)>8:
docw.configure(listbox_height=8)
else:
docw.configure(listbox_height=len(d))
else:
x = string.split(docstring," ")
#formatting documenation
if len(x)>10:
docw.configure(listbox_height=len(x)/10)
else:
docw.configure(listbox_height=1)
docw.setlist(d)
# self.cmdForms['loadCmds'].mf.configure(cursor=c)
#when show documentation on after selcting a package
#dg is expanded to show documenttation
#if var==1 and docw.size()>0:
##if docw.size()>0:
## dg.expand()
def displayCmds_cb(self, event=None):
#print "displayCmds_cb"
global cmd_docslist
self.cmdForms['loadCmds'].mf.update_idletasks()
ebn = self.cmdForms['loadCmds'].descr.entryByName
dg = ebn['DOCGROUP']['widget']
dg.collapse()
cmdW = ebn['cmdList']['widget']
cmdW.clear()
# docb=ebn['docbutton']['widget']
# var=ebn['docbutton']['wcfg']['variable'].get()
modName = ebn['modList']['widget'].getcurselection()
if modName == (0 or ()): return
else:
modName = modName[0]
importName = self.currentPack + '.' + modName
try:
m = __import__(importName, globals(), locals(),['commandList'])
except:
return
if not hasattr(m, 'commandList'):
return
cmdNames = map(lambda x: x['name'], m.commandList)
cmdNames.sort()
if modName:
self.var=1
d =[]
docstring =m.__doc__
import string
docw = ebn['doclist']['widget']
docw.clear()
if docstring!=None :
if '\n' in docstring:
x = string.split(docstring,"\n")
for i in x:
if i !='':
d.append(i)
#formatting documenation
if len(d)>8:
docw.configure(listbox_height=8)
else:
docw.configure(listbox_height=len(d))
else:
d.append(docstring)
x = string.split(docstring," ")
#formatting documenation
if len(x)>10:
docw.configure(listbox_height=len(x)/10)
else:
docw.configure(listbox_height=1)
docw.setlist(d)
CmdName=ebn['cmdList']['widget'].getcurselection()
cmdW.setlist(cmdNames)
#when show documentation is on after selcting a module or a command
#dg is expanded to show documenttation
#if var==1 and docw.size()>0:
if docw.size()>0:
dg.expand()
def displayCmd_cb(self, event=None):
#print "displayCmd_cb"
global cmd_docslist
self.cmdForms['loadCmds'].mf.update_idletasks()
ebn = self.cmdForms['loadCmds'].descr.entryByName
dg = ebn['DOCGROUP']['widget']
dg.collapse()
# docb=ebn['docbutton']['widget']
# var=ebn['docbutton']['wcfg']['variable'].get()
modName = ebn['modList']['widget'].getcurselection()
if modName == (0 or ()): return
else:
modName = modName[0]
importName = self.currentPack + '.' + modName
try:
m = __import__(importName, globals(), locals(),['commandList'])
except:
self.warningMsg("ERROR: Cannot find commands for %s"%modName)
return
if not hasattr(m, 'commandList'):
return
cmdNames = map(lambda x: x['name'], m.commandList)
cmdNames.sort()
if modName:
self.var=1
d =[]
docstring =m.__doc__
import string
docw = ebn['doclist']['widget']
docw.clear()
if docstring!=None :
if '\n' in docstring:
x = string.split(docstring,"\n")
for i in x:
if i !='':
d.append(i)
#formatting documenation
if len(d)>8:
docw.configure(listbox_height=8)
else:
docw.configure(listbox_height=len(d))
else:
d.append(docstring)
x = string.split(docstring," ")
#formatting documenation
if len(x)>10:
docw.configure(listbox_height=len(x)/10)
else:
docw.configure(listbox_height=1)
docw.setlist(d)
cmdW = ebn['cmdList']['widget']
CmdName=ebn['cmdList']['widget'].getcurselection()
cmdW.setlist(cmdNames)
if len(CmdName)!=0:
for i in m.commandList:
if i['name']==CmdName[0]:
c = i['cmd']
if CmdName[0] in cmdNames:
ind= cmdNames.index(CmdName[0])
cmdW.selection_clear()
cmdW.selection_set(ind)
d =[]
docstring=c.__doc__
docw = ebn['doclist']['widget']
docw.clear()
if CmdName[0] not in cmd_docslist.keys():
cmd_docslist[CmdName[0]]=d
import string
if docstring!=None :
if '\n' in docstring:
x = string.split(docstring,"\n")
for i in x:
if i !='':
d.append(i)
if len(d)>8:
docw.configure(listbox_height=8)
else:
docw.configure(listbox_height=len(d))
else:
d.append(docstring)
x = string.split(docstring," ")
if len(x)>10:
docw.configure(listbox_height=len(x)/10)
else:
docw.configure(listbox_height=1)
docw.setlist(d)
#when show documentation is on after selcting a module or a command
#dg is expanded to show documenttation
#if var==1 and docw.size()>0:
if docw.size()>0:
dg.expand()
def loadMod_cb(self, event=None):
ebn = self.cmdForms['loadCmds'].descr.entryByName
selMod = ebn['modList']['widget'].getcurselection()
if len(selMod)==0: return
else:
self.txtGUI = ""
apply(self.doitWrapper, ( selMod[0],),
{'commands':None, 'package':self.currentPack, 'removable':True})
self.dismiss_cb(None)
if self.txtGUI:
self.txtGUI = "\n Access this command via:\n"+self.txtGUI
tkMessageBox.showinfo("Load Module", selMod[0]+" loaded successfully!\n"+self.txtGUI)
# def loadCmd_cb(self, event=None):
# ebn = self.cmdForms['loadCmds'].descr.entryByName
# selCmds = ebn['cmdList']['widget'].getcurselection()
# selMod = ebn['modList']['widget'].getcurselection()
# if len(selCmds)==0: return
# else:
# apply(self.doitWrapper, (selMod[0],), {'commands':selCmds,
# 'package':self.currentPack})
class loadModuleCommand(Command):
"""Command to load dynamically modules to the Viewer import the file called name.py and execute the function initModule defined in that file Raises a ValueError exception if initModule is not defined
\nPackage : ViewerFramework
\nModule : basicCommand.py
\nClass : loadModuleCommand
\nCommand : loadModule
\nSynopsis:\n
None<--loadModule(filename, package=None, **kw)
\nRequired Arguements:\n
filename --- name of the module
\nOptional Arguements:\n
package --- name of the package to which filename belongs
"""
active = 0
def doit(self, filename, package):
# This is NOT called because we call browseCommand()"
if package is None:
_package = filename
else:
_package = "%s.%s"%(package, filename)
try:
mod = __import__( _package, globals(), locals(), ['initModule'])
if hasattr(mod, 'initModule') or not callable(mod.initModule):
mod.initModule(self.vf)
else:
self.vf.warningMsg('module %s has not initModule function')
except ImportError:
self.vf.warningMsg('module %s could not be imported'%_package)
## if package is None:
## _package = filename
## else:
## _package = "%s.%s"%(package, filename)
## module = self.vf.tryto( __import__ , _package, globals(), locals(),
## [filename])
## if module=='ERROR':
## print '\nWARNING: Could not load module %s' % filename
## return
def __call__(self, filename, package=None, **kw):
"""None<---loadModule(filename, package=None, **kw)
\nRequired Arguements:\n
filename --- name of the module
\nOptional Arguements:\n
package --- name of the package to which filename belongs
"""
if package==None:
package=self.vf.libraries[0]
if not kw.has_key('redraw'):
kw['redraw'] = 0
kw['package'] = package
apply(self.vf.browseCommands, (filename,), kw)
#apply( self.doitWrapper, (filename, package), kw )
def loadModule_cb(self, event=None):
# c = self.cmdForms['loadModule'].mf.cget('cursor')
# self.cmdForms['loadModule'].mf.configure(cursor='watch')
# self.cmdForms['loadModule'].mf.update_idletasks()
ebn = self.cmdForms['loadModule'].descr.entryByName
moduleName = ebn['Module List']['widget'].get()
package = ebn['package']['widget'].get()
if moduleName:
self.vf.browseCommands(moduleName[0], package=package, redraw=0)
# self.cmdForms['loadModule'].mf.configure(cursor=c)
def loadModules(self, package, library=None):
modNames = []
doc = []
self.filenames={}
self.allPack={}
self.allPack=findAllVFPackages()
if package is None: return [], []
if not self.filenames.has_key(package):
pack=self.allPack[package]
#finding modules in a package
self.filenames[pack] =findModulesInPackage(pack,"^def initModule",fileNameFilters=['Command'])
# dictionary of files keys=widget, values = filename
for key, value in self.filenames[pack].items():
pathPack = key.split(os.path.sep)
if pathPack[-1] == package:
newModName = map(lambda x: x[:-3], value)
#for mname in newModName:
#if not modulename has Command in it delete from the
#modules list
#if "Command" not in mname :
#ind = newModName.index(mname)
#del newModName[ind]
#if "Command" in mname :
if hasattr(newModName,"__doc__"):
doc.append(newModName.__doc__)
else:
doc.append(None)
modNames = modNames + newModName
else:
pIndex = pathPack.index(package)
prefix = join(pathPack[pIndex+1:], '.')
newModName = map(lambda x: "%s.%s"%(prefix, x[:-3]), value)
#for mname in newModName:
#if not modulename has Command in it delete from the
#modules list
#if "Command" not in mname :
#ind = newModName.index(mname)
#del newModName[ind]
if hasattr(newModName,"__doc__"):
doc.append(newModName.__doc__)
else:
doc.append(None)
modNames = modNames + newModName
modNames.sort()
return modNames, doc
def package_cb(self, event=None):
ebn = self.cmdForms['loadModule'].descr.entryByName
pack = ebn['package']['widget'].get()
names, docs = self.loadModules(pack)
w = ebn['Module List']['widget']
w.clear()
for n,d in map(None, names, docs):
w.insert('end', n, d)
def buildFormDescr(self, formName):
"""create the cascade menu for selecting modules to be loaded"""
if not formName == 'loadModule':return
ifd = InputFormDescr(title='Load command Modules')
names, docs = self.loadModules(self.vf.libraries[0])
entries = map(lambda x: (x, None), names)
pname=self.vf.libraries
for p in pname:
if '.' in p:
ind = pname.index(p)
del pname[ind]
ifd.append({
'name':'package',
'widgetType': Pmw.ComboBox,
'defaultValue': pname[0],
'wcfg':{ 'labelpos':'nw', 'label_text':'Package:',
'selectioncommand': self.package_cb,
'scrolledlist_items':pname
},
'gridcfg':{'sticky':'ew', 'padx':2, 'pady':1}
})
ifd.append({'name': 'Module List',
'widgetType':ListChooser,
'wcfg':{
'title':'Choose a module',
'entries': entries,
'lbwcfg':{'width':27,'height':10},
'command':self.loadModule_cb,
'commandEvent':"<Double-Button-1>"
},
'gridcfg':{'sticky':Tkinter.E+Tkinter.W}
})
ifd.append({'name': 'Load Module',
'widgetType':Tkinter.Button,
'wcfg':{'text':'Load Module',
'command': self.loadModule_cb,
'bd':6},
'gridcfg':{'sticky':Tkinter.E+Tkinter.W},
})
ifd.append({'widgetType':Tkinter.Button,
'wcfg':{'text':'Dismiss',
'command': self.Dismiss_cb},
'gridcfg':{'sticky':Tkinter.E+Tkinter.W}})
return ifd
def Dismiss_cb(self):
self.cmdForms['loadModule'].withdraw()
self.active = 0
def guiCallback(self, event=None):
if self.active: return
self.active = 1
form = self.showForm('loadModule', force=1,modal=0,blocking=0)
form.root.protocol('WM_DELETE_WINDOW',self.Dismiss_cb)
class loadCommandCommand(loadModuleCommand):
"""Command to load dynamically individual commands in the Viewer.
\nPackage : ViewerFramework
\nModule : basicCommand.py
\nClass : loadCommandCommand
\nCommand : loadCommand
\nSynopsis:\n
None <- loadCommand(moduleName, commandsName, package=None,
gui=None)
\nRequired Arguements:\n
moduleName --- name of the module to be loaded
commandsName --- name of the Command or list of Commands
\nOptional Arguements:\n
package --- name of the package to which filename belongs
"""
active = 0
def doit(self, module, commands, package=None, gui=None):
"""Load a command instance with the given alias and gui"""
if package is None:
package = self.vf.libraries[0]
if not type(commands)!=types.ListType:
commands = [commands,]
for command in commands:
cmd, name, gui = self.getDefaults(package, module, command)
if cmd is None:
print 'Could not add %s.%s.%s'%(package, module, command)
continue
self.vf.addCommand( cmd, name, gui )
def guiCallback(self, event=None):
if self.active: return
self.active = 1
form = self.showForm('loadCommand', force=1,modal=0,blocking=0)
form.root.protocol('WM_DELETE_WINDOW',self.Dismiss_cb)
def package_cb(self, event=None):
# Get Package.
ebn = self.cmdForms['loadCommand'].descr.entryByName
pack = ebn['package']['widget'].get()
# Get Modules for the new package and update the listChooser.
names, docs = self.loadModules(pack)
mw = ebn['Module List']['widget']
mw.clear()
for n in names:
mw.insert('end',n)
mw.set(names[0])
# Get Commands for the first module and update the listChooser
cw = ebn['Command List']['widget']
cw.clear()
cmds = self.getModuleCmds(modName=names[0],
package=pack)
if cmds==None:
return
for cmd in map(lambda x: x[0],cmds):
cw.insert('end', cmd)
def Dismiss_cb(self):
self.cmdForms['loadCommand'].withdraw()
self.active = 0
def __call__(self, moduleName, commandsName,
package=None, gui=None, **kw):
"""None <- loadCommand(moduleName, commandsName, package=None,
gui=None)
\nRequired Arguements:\n
moduleName --- name of the module to be loaded
commandsName --- name of the Command or list of Commands
\nOptional Arguements:\n
package --- name of the package to which filename belongs
"""
kw['package'] = package
#kw['gui'] = gui
#apply(self.doitWrapper, (moduleName, commandsName), kw)
kw['commands']=commandsName
apply(self.vf.browseCommands, (moduleName,), kw)
def getDefaults(self, package, filename, commandName):
if package is None: _package = filename
else: _package = "%s.%s"%(package, filename)
mod = self.vf.tryto(__import__,_package, globals(), locals(),
[filename])
if mod=='ERROR':
print '\nERROR: Could not load module %s.%s' % (_package,
filename)
return None,None,None
for d in mod.commandList:
if d['name']==commandName:
break
if d['name']!=commandName:
print '\nERROR: command %s not found in module %s.%s\n' % \
(commandName, package, filename)
return None,None,None
return d['cmd'], d['name'], d['gui']
def loadCmd_cb(self, event=None):
# c = self.cmdForms['loadCommand'].mf.cget('cursor')
# self.cmdForms['loadCommand'].mf.configure(cursor='watch')
# self.cmdForms['loadCommand'].mf.update_idletasks()
ebn = self.cmdForms['loadCommand'].descr.entryByName
module = ebn['Module List']['widget'].get()
commands = ebn['Command List']['widget'].get()
package = ebn['package']['widget'].get()
if commands:
kw = {'package': package}
#apply(self.doitWrapper, (module, commands), kw)
kw['commands'] = commands
apply(self.vf.browseCommands, (module[0],), kw)
# self.cmdForms['loadCommand'].mf.configure(cursor=c)
def getModuleCmds(self, modName=None, package=None):
""" Callback method of the module chooser to get the corresponding
commands:
"""
filename = modName
if package is None: _package = filename
else: _package = "%s.%s"%(package, filename)
try:
mod = __import__(_package,globals(),locals(),['commandList'])
except:
self.vf.warningMsg("ERROR: Could not load module %s "%filename)
return "ERROR"
if hasattr(mod,"initModule"):
mod.initModule(self.vf)
else:
self.vf.warningMsg("ERROR: Could not load module %s"%filename)
return
if not hasattr(mod, 'commandList'):
cmdEntries = []
else:
cmdsName = map(lambda x: x['name'], mod.commandList)
cmdsName.sort()
cmdEntries = map(lambda x: (x,None), cmdsName)
return cmdEntries
def modChooser_cb(self, event=None):
"""CallBack method that gets called when clicking on an entry
of the mocule cjooser."""
ebn = self.cmdForms['loadCommand'].descr.entryByName
modChooser = ebn['Module List']['widget']
filename = modChooser.get()[0]
package = ebn['package']['widget'].get()
cmdEntries = self.getModuleCmds(modName=filename, package=package)
cmdChooser = ebn['Command List']['widget']
cmdChooser.clear()
#cmdEntries should be a list if there are no commands for a module then getModuleCmds returns None or Error in that case cmdEntries is made equal to emptylist
if cmdEntries=="ERROR" or cmdEntries==None:
cmdEntries=[]
map(cmdChooser.add, cmdEntries)
def buildFormDescr(self, formName):
"""Create the pulldown menu and Listchooser to let for the
selection of commands."""
if not formName == 'loadCommand': return
ifd = InputFormDescr(title='Load Individual Commands')
moduleNames, docs = self.loadModules(self.vf.libraries[0])
moduleNames.sort()
moduleEntries = map(lambda x: (x, None), moduleNames)
pname = self.vf.libraries
for p in pname:
if '.' in p:
ind = pname.index(p)
del pname[ind]
ifd.append({
'name':'package',
'widgetType': Pmw.ComboBox,
'defaultValue': pname[0],
'wcfg':{ 'labelpos':'nw', 'label_text':'Package:',
'selectioncommand': self.package_cb,
'scrolledlist_items':pname
},
'gridcfg':{'columnspan':2,'sticky':'ew', 'padx':2, 'pady':1}
})
ifd.append({'name': 'Module List',
'widgetType':ListChooser,
'wcfg':{'title':'Choose a module',
'entries': moduleEntries,
'lbwcfg':{ 'width':25,
'height':10,
'exportselection':0},
'command':self.modChooser_cb,
},
'gridcfg':{'sticky':'ew'}
} )
CmdEntries = []
ifd.append({'name': 'Command List',
'widgetType':ListChooser,
'wcfg':{'title':'Choose a command',
'mode':'multiple',
'entries': CmdEntries,
'command':self.loadCmd_cb,
'commandEvent':"<Double-Button-1>",
'lbwcfg':{'width':25,'height':10,
'exportselection':0}
},
'gridcfg':{'row':-1,'sticky':'we'}
} )
ifd.append({'name': 'Load Command',
'widgetType':Tkinter.Button,
'wcfg':{'text':'Load Command',
'bd':6, 'command': self.loadCmd_cb},
'gridcfg':{'columnspan':2,'sticky':'ew'},
})
ifd.append({'widgetType':Tkinter.Button,
'wcfg':{'text':'Dismiss',
'command': self.Dismiss_cb},
'gridcfg':{'columnspan':2,'sticky':'ew'}})
return ifd
class loadMacroCommand(Command):
"""Command to load dynamically macro commands.
\nPackage : ViewerFramework
\nModule : basicCommand.py
\nClass : loadMacroCommand
\nCommand : loadMacro
\nSynopsis:\n
None<---loadMacro(macroName, macroFile, menuBar='menuRoot',
menuButton='Macros', menuEntry=None, cascade=None, **kw)
"""
active = 0
def getMacros(self, file):
"""load all macro functions from a given file"""
names = []
macros = []
doc = []
#if not os.path.exists(file) : return names, macros, doc
_dir, file = os.path.split(file)
if file[-3:]=='.py': file = file[:-3]
import sys
sys.path.insert(0, _dir)
m = __import__(file, globals(), locals(), [])
sys.path = sys.path[1:]
#m.__dict__['self'] = self.vf
setattr(m, 'self', self.vf)
import types
for key, value in m.__dict__.items():
if type(value) == types.FunctionType:
names.append(key)
macros.append(value)
doc.append(value.__doc__)
return names, macros, doc
def loadMacLib_cb(self, filename):
"""Call back function for 'Open Macro library' button"""
ebn = self.cmdForms['loadMacro'].descr.entryByName
ebn['openMacLib']['widget'].button.configure(relief='sunken')
names, macros, docs = self.getMacros(filename)
self.macroFile = filename
self.macNames = names
self.macMacros = macros
self.macDoc = docs
# Get a handle to the listchooser widget
lc = ebn['macros']['widget']
lc.clear()
if len(names) == len(docs):
entries = map(lambda x, y: (x, y), names, docs)
else:
entries = map(lambda x: (x, None), names)
map(lc.add, entries)
ebn['openMacLib']['widget'].button.configure(relief='raised')
# set cascade name to libary Name - "mac"
w = ebn['cascade']['widget']
w.delete(0, 'end')
w.insert(0, os.path.split(filename)[1][:-3])
def setDefaultEntryName(self, event=None):
"""Call back function for the listchooser showing macros.
gets the name of the currently selected macro and puts it in the entry
type in"""
# enable add button
ebn = self.cmdForms['loadMacro'].descr.entryByName
ebn['loadMacro']['widget'].configure(state='normal')
# put default name into name entry
val = ebn['macros']['widget'].get()
w = ebn['menuentry']['widget']
w.delete(0, 'end')
w.insert(0, val[0])
#self.selectedMac = val[0]
def loadMacro_cb(self, event=None):
ebn = self.cmdForms['loadMacro'].descr.entryByName
bar = ebn['menubar']['widget'].get()
menub = ebn['menubutton']['widget'].get()
name = ebn['menuentry']['widget'].get()
cascade = ebn['cascade']['widget'].get()
if cascade=='': cascade=None
lc = ebn['macros']['widget']
macNames = lc.get()
if len(macNames) != 0: macName = macNames[0]
#macIndex = self.macNames.index(self.selectedMac)
#macFunc = self.macMacros[macIndex]
self.doitWrapper(macName, self.macroFile, menuBar=bar,
menuButton=menub,
menuEntry=name, cascade=cascade)
###self.addMacro(macFunc, bar, menub, name, cascade)
ebn['openMacLib']['widget'].button.configure(relief='raised')
def dismiss_cb(self):
self.cmdForms['loadMacro'].withdraw()
self.active = 0
def buildFormDescr(self, formName):
if not formName=='loadMacro': return None
ifd = InputFormDescr(title='Load Macros')
if len(self.vf.libraries) is None:
modu = __import__('ViewerFramework')
else:
modu = __import__(self.vf.libraries[0])
idir = os.path.split(modu.__file__)[0] + '/Macros'
if not os.path.exists(idir):
idir = None
# 0
ifd.append({'widgetType':LoadButton,
'name':'openMacLib',
'wcfg':{'buttonType':Tkinter.Button,
'title':'Open Macro Library...',
'types':[('Macro Module Library', '*Mac.py'),
('Any Python Function', '*.py')],
'callback':self.loadMacLib_cb,
'idir':idir,
'widgetwcfg':{'text':'Open Macro Library'}},
'gridcfg':{'sticky':'we'}})
# 1
ifd.append({'name':'macros',
'widgetType':ListChooser,
'wcfg':{'title':'Choose a macro',
'command':self.setDefaultEntryName,
'title':'Choose a macro'},
'gridcfg':{'sticky':Tkinter.E+Tkinter.W}} )
# 2
ifd.append({'widgetType':Tkinter.Entry,
'name':'menubar',
'defaultValue':'menuRoot',
'wcfg':{'label':'menu bar'},
'gridcfg':{'sticky':Tkinter.E}
})
# 3
ifd.append({'widgetType':Tkinter.Entry,
'name':'menubutton',
'defaultValue':'Macros',
'wcfg':{'label':'menu button'},
'gridcfg':{'sticky':Tkinter.E}
})
# 4
ifd.append({'widgetType':Tkinter.Entry,
'name':'menuentry',
'defaultValue':'',
'wcfg':{'label':'menu entry'},
'gridcfg':{'sticky':Tkinter.E}
})
# 5
ifd.append({'widgetType':Tkinter.Entry,
'name':'cascade',
'defaultValue':'',
'wcfg':{'label':'cascade'},
'gridcfg':{'sticky':Tkinter.E}
})
# 6
ifd.append({'name': 'loadMacro',
'widgetType':Tkinter.Button,
'wcfg':{'text':'Load Macro',
'bd':6,'command': self.loadMacro_cb},
'gridcfg':{'sticky':Tkinter.E+Tkinter.W},
})
# 7
ifd.append({'widgetType':Tkinter.Button,
'wcfg':{'text':'Dismiss',
'command': self.dismiss_cb}})
return ifd
def guiCallback(self, event=None):
if self.active: return
self.active = 1
val = self.showForm("loadMacro", force=1,modal=0,blocking=0)
ebn = self.cmdForms['loadMacro'].descr.entryByName
ebn['loadMacro']['widget'].configure(state='disabled')
def __call__(self, macroName, macroFile, menuBar='menuRoot',
menuButton='Macros', menuEntry=None, cascade=None, **kw):
"""None<---loadMacro(macroName, macroFile, menuBar='menuRoot',
menuButton='Macros', menuEntry=None, cascade=None, **kw)
"""
self.doitWrapper(macroName, macroFile, menuBar=menuBar,
menuButton=menuButton, menuEntry=menuEntry,
cascade=cascade)
def doit(self, macroName, macroFile, menuBar='menuRoot',
menuButton='Macros', menuEntry=None, cascade=None):
if not hasattr(self, 'macroFile') or macroFile != self.macroFile:
names, macros, docs = self.getMacros(macroFile)
else:
names = self.macNames
macros = self.macMacros
docs = self.macDoc
if len(names) == 0 or len(macros)==0 or len(docs)==0: return
macIndex = names.index(macroName)
macro = macros[macIndex]
from VFCommand import Command, CommandGUI
c = Command(func=macro)
g = CommandGUI()
if cascade:
g.addMenuCommand(menuBar, menuButton, menuEntry,
cascadeName=cascade)
else:
g.addMenuCommand(menuBar, menuButton, menuEntry)
self.vf.addCommand(c, macro.__name__, g)
## class loadMacroCommand(Command):
## """
## Command to load dynamically macro commands.
## Using the Gui the user can open a macro file. The macros available in
## that file are then displayed in a list chooser. When a macro is selected
## in the listchooser, its documentation string is deisplayed and a default
## name for the macro in the viewer is suggested. The user can also specify
## a menuBar, a menuButton as well as an optional cascade name.
## """
## active = 0
## def getMacros(self, file):
## """load all macro functions from file"""
## self.file = file
## _dir, file = os.path.split(file)
## if file[-3:]=='.py': file = file[:-3]
## import sys
## sys.path.insert(0, _dir)
## m = __import__(file, globals(), locals(), [])
## sys.path = sys.path[1:]
## m.__dict__['self'] = self.vf
## import types
## names = []
## macros = []
## doc = []
## for key,value in m.__dict__.items():
## if type(value)==types.FunctionType:
## names.append(key)
## macros.append(value)
## doc.append(value.__doc__)
## return names, macros, doc
## def loadMacLib_cb(self, filename):
## """Call back function for 'Open Macro library' button"""
## # self.ifd[0]['widget'] is the 'Open Macro Library' button
## self.ifd[0]['widget'].configure(relief='sunken')
## #file = os.path.split(filename)[1][:-3]
## names, macros, docs = self.getMacros(filename)
## self.macNames = names
## self.macMacros = macros
## self.macDoc = docs
## # Get a handle to the listchooser widget
## lc = self.ifd[1]['widget']
## lc.clear()
## if len(names) == len(docs):
## entries = map(lambda x, y: (x, y), names, docs)
## else:
## entries = map(lambda x: (x, None), names)
## map(lc.add, entries)
## self.ifd[0]['widget'].configure(relief='raised')
## # set cascade name to libary Name - "mac"
## w = self.ifd[5]['widget']
## w.delete(0, 'end')
## w.insert(0, os.path.split(filename)[1][:-3])
## def setDefaultEntryName(self, event=None):
## """Call back function for the listchooser showing macros.
## gets the name of the currently selected macro and puts it in the entry
## type in"""
## # enable add button
## self.ifd.entryByName['Load Macro']['widget'].configure(state='normal')
## # put default name into name entry
## val = self.ifd[1]['widget'].get()
## w = self.ifd[4]['widget']
## w.delete(0, 'end')
## w.insert(0, val[0])
## self.selectedMac = val[0]
## def addMacro(self, macro, menuBar, menuButton, name, cascade=None):
## from VFCommand import Command, CommandGUI
## c = Command(func=macro)
## g = CommandGUI()
## if cascade:
## g.addMenuCommand(menuBar, menuButton, name, cascadeName=cascade)
## else:
## g.addMenuCommand(menuBar, menuButton, name)
## self.vf.addCommand(c, macro.__name__, g)
## ## g.register(self.vf)
## self.log(file=self.file, macroName=macro.__name__, menuBar=menuBar,
## menuButton=menuButton, name=name, cascade=cascade)
## def loadMacro_cb(self, event=None):
## bar = self.ifd[2]['widget'].get()
## menub = self.ifd[3]['widget'].get()
## name = self.ifd[4]['widget'].get()
## cascade = self.ifd[5]['widget'].get()
## if cascade=='': cascade=None
## macIndex = self.macNames.index(self.selectedMac)
## macFunc = self.macMacros[macIndex]
## self.addMacro(macFunc, bar, menub, name, cascade)
## self.ifd[0]['widget'].configure(relief='raised')
## def customizeGUI(self):
## """create the cascade menu for selecting modules to be loaded"""
## self.selectedMac = ''
## # create the for descriptor
## ifd = self.ifd = InputFormDescr(title='Load macro commands')
## if len(self.vf.libraries) is None:
## modu = __import__('ViewerFramework')
## else:
## modu = __import__(self.vf.libraries[0])
## idir = os.path.split(modu.__file__)[0] + '/Macros'
## if not os.path.exists(idir):
## idir = None
## ifd.append( {'widgetType':'OpenButton', 'text':'Open Macro library ...',
## 'types':[('Macro Module Library', '*Mac.py'),
## ('Any Python Function', '*.py')],
## 'idir':idir,
## 'title':'Open Macro File',
## 'callback': self.loadMacLib_cb } )
## ifd.append({'title':'Choose a macro',
## 'widgetType':ListChooser,
## 'wcfg':{
## 'command':self.setDefaultEntryName,
## 'title':'Choose a macro'},
## 'gridcfg':{'sticky':Tkinter.E+Tkinter.W}} )
## ifd.append({'widgetType':Tkinter.Entry,
## 'defaultValue':'menuRoot',
## 'wcfg':{'label':'menu bar'},
## 'gridcfg':{'sticky':Tkinter.E}
## })
## ifd.append({'widgetType':Tkinter.Entry,
## 'defaultValue':'Macros',
## 'wcfg':{'label':'menu button'},
## 'gridcfg':{'sticky':Tkinter.E}
## })
## ifd.append({'widgetType':Tkinter.Entry,
## 'defaultValue':'',
## 'wcfg':{'label':'menu entry'},
## 'gridcfg':{'sticky':Tkinter.E}
## })
## ifd.append({'widgetType':Tkinter.Entry,
## 'defaultValue':'',
## 'wcfg':{'label':'cascade'},
## 'gridcfg':{'sticky':Tkinter.E}
## })
## ifd.append({'name': 'Load Macro',
## 'widgetType':Tkinter.Button,
## 'text':'Load Macro',
## 'wcfg':{'bd':6},
## 'gridcfg':{'sticky':Tkinter.E+Tkinter.W},
## 'command': self.loadMacro_cb})
## ifd.append({'widgetType':Tkinter.Button,
## 'text':'Dismiss',
## 'command': self.Dismiss_cb})
## def Dismiss_cb(self):
## #self.cmdForms['loadMacro'].withdraw()
## self.ifd.form.destroy()
## self.active = 0
## def guiCallback(self, event=None, file=None):
## if self.active: return
## self.active = 1
## self.customizeGUI()
## self.form = self.vf.getUserInput(self.ifd, modal=0, blocking=0)
## self.ifd.entryByName['Load Macro']['widget'].configure(state='disabled')
## if file: self.loadMacLib_cb(file)
## def __call__(self, file=None, macroName=None, menuBar='menuRoot',
## menuButton='Macros', name=None, cascade=None):
## """file=None, macroName=None, menuBar='menuRoot', menuButton='Macros',
## name=None, cascade=None"""
## if not macroName: self.guiCallback(file=file)
## else:
## if file[-3:]=='.py': file = file[:-3]
## names, macros, docs = self.getMacros(file)
## i = names.index(macroName)
## if name==None: name=macroName
## self.addMacro(macros[i], menuBar, menuButton, name, cascade)
class ShellCommand(Command):
"""Command to show/Hide the Python shell.
\nPackage : ViewerFramework
\nModule : basicCommand.py
\nClass : ShellCommand
\nCommand : Shell
\nSynopsis:\n
None<---Shell()
"""
def onAddCmdToViewer(self):
if self.vf.hasGui:
self.vf.GUI.pyshell.top.protocol('WM_DELETE_WINDOW',
self.vf.Shell.onDestroy)
def show(self):
self.vf.GUI.pyshell.top.deiconify()
def hide(self):
self.vf.GUI.pyshell.top.withdraw()
def __call__(self, *args):
"""None<---Shell()
"""
if args[0]:
self.show()
self.vf.GUI.toolbarCheckbuttons['Shell']['Variable'].set(1)
else:
self.hide()
self.vf.GUI.toolbarCheckbuttons['Shell']['Variable'].set(0)
def guiCallback(self):
on = self.vf.GUI.toolbarCheckbuttons['Shell']['Variable'].get()
if on: self.show()
else: self.hide()
def onDestroy(self):
self.vf.GUI.toolbarCheckbuttons['Shell']['Variable'].set(0)
self.hide()
ShellCommandGUI = CommandGUI()
ShellCommandGUI.addToolBar('Shell', icon1='PyShell.gif',
balloonhelp='Python IDLE Shell', index=1)
class SaveSessionCommand(Command):
"""Command to allow the user to save the session as it is in a file.
It logs all the transformation.
\nPackage : Pmv
\nModule : customizationCommands.py
\nClass : SaveSessionCommand
"""
def logString(self, *args, **kw):
"""return None as log string as we don't want to log this
"""
pass
def guiCallback(self, event=None):
### FIXME all the logs should be in a stack and not in a file.
if self.vf.logMode == 'no':
self.vf.warningMsg("No log information because logMode was set to no.")
return
newfile = self.vf.askFileSave(types = [
('Pmv sesion files', '*.psf'),
('all files', '*.py')],
defaultextension=".psf",
title = 'Save Session in File:')
if not newfile is None:
self.doitWrapper(newfile, redraw=0)
def doit(self, filename):
#print "SaveSessionCommand.doit"
ext = os.path.splitext(filename)[1].lower()
if ext=='.psf':
self.vf.saveFullSession(filename)
else:
import shutil
# get the current log.
if hasattr(self.vf, 'logAllFile'):
logFileName = self.vf.logAllFile.name
self.vf.logAllFile.close()
if filename!=logFileName:
shutil.copy(logFileName, filename)
self.vf.logAllFile = open(logFileName,'a')
# Add to it the transformation log.
logFile = open(filename,'a')
vi = self.vf.GUI.VIEWER
code = vi.getViewerStateDefinitionCode('self.GUI.VIEWER')
code.extend( vi.getObjectsStateDefinitionCode('self.GUI.VIEWER') )
if code:
for line in code:
logFile.write(line)
if vi.GUI.contourTk.get():
controlpoints=vi.GUI.curvetool.getControlPoints()
sensitivity=vi.GUI.d1scalewheel.get()
logFile.write("self.GUI.VIEWER.GUI.curvetool.setControlPoints(%s)" %controlpoints)
logFile.write("\n")
logFile.write("self.GUI.VIEWER.GUI.curvetool.setSensitivity(%s)" %sensitivity)
#sceneLog = self.vf.Exit.logScene()
#for l in sceneLog:
# l1 = l+'\n'
# logFile.write(l1)
logFile.close()
if hasattr(self.vf, 'recentFiles'):
self.vf.recentFiles.add(filename, 'readSourceMolecule')
# SaveSessionCommand Command GUI
SaveSessionCommandGUI = CommandGUI()
SaveSessionCommandGUI.addMenuCommand(
'menuRoot', 'File', 'Current Session', index=2,
cascadeName='Save', cascadeIndex=2, separatorAboveCascade=1)
SaveSessionCommandGUI.addToolBar('Save', icon1='filesave.gif',
type='ToolBarButton',
balloonhelp='Save Session', index=1)
class ExitCommand(Command):
"""Command to destroy application
\nPackage : ViewerFramework
\nModule : basicCommand.py
\nClass : ExitCommand
\nCommand : Exit
\nSynopsis:\n
None<---Exit(ask)
\nask = Flag when set to 1 a form asking you if you really want to quit
will popup, it will quit directly if set to 0
"""
def onAddCmdToViewer(self):
import warnings
if self.vf.hasGui:
self.vf.GUI.ROOT.protocol('WM_DELETE_WINDOW',self.askquit)
def logObjectTransformations(self, object):
warnings.warn( "logObjectTransformations is deprecated",
DeprecationWarning, stacklevel=2)
log = []
# FIXME won't work with instance matrices
mat = object.GetMatrix(object)
import numpy.oldnumeric as Numeric
log.append("self.transformObject('rotation', '%s', matrix=%s,log=0)"%(object.fullName,tuple(object.rotation)))
log.append("self.transformObject('translation', '%s', matrix=%s, log=0 )"%(object.fullName, tuple(object.translation)))
log.append("self.transformObject('scale', '%s', matrix=%s, log=0 )"%(object.fullName,tuple(object.scale)))
log.append("self.transformObject('pivot', '%s', matrix=%s, log=0 )"%(object.fullName,tuple(object.pivot)))
return log
def logObjectMaterial(self, object):
warnings.warn("logObjectMaterial is deprecated",
DeprecationWarning, stacklevel=2)
log = []
from opengltk.OpenGL import GL
log.append("from opengltk.OpenGL import GL")
mat = object.materials[GL.GL_FRONT]
log.append("self.setObject('%s', materials=%s, propName='ambi', matBind=%d)" % (object.fullName, repr(mat.prop[0])[6:-5],mat.binding[0]))
log.append("self.setObject('%s', materials=%s, propName='diff', matBind=%d)" % (object.fullName, repr(mat.prop[1])[6:-5],mat.binding[1]))
log.append("self.setObject('%s', materials=%s, propName='emis', matBind=%d)" % (object.fullName, repr(mat.prop[2])[6:-5],mat.binding[2]))
log.append("self.setObject('%s', materials=%s, propName='spec', matBind=%d)" % (object.fullName, repr(mat.prop[3])[6:-5],mat.binding[3]))
log.append("self.setObject('%s', materials=%s, propName='shini', matBind=%d)" % (object.fullName, repr(mat.prop[4])[6:-5],mat.binding[4]))
mat = object.materials[GL.GL_BACK]
log.append("self.setObject('%s', materials=%s, polyFace=GL.GL_BACK,propName='ambi', matBind=%d)" % (object.fullName, repr(mat.prop[0])[6:-5],mat.binding[0]))
log.append("self.setObject('%s', materials=%s, polyFace=GL.GL_BACK,propName='diff', matBind=%d)" % (object.fullName, repr(mat.prop[1])[6:-5],mat.binding[1]))
log.append("self.setObject('%s', materials=%s, polyFace=GL.GL_BACK,propName='spec', matBind=%d)" % (object.fullName, repr(mat.prop[2])[6:-5],mat.binding[2]))
log.append("self.setObject('%s', materials=%s, polyFace=GL.GL_BACK,propName='emis', matBind=%d)" % (object.fullName, repr(mat.prop[3])[6:-5],mat.binding[3]))
log.append("self.setObject('%s', materials=%s, polyFace=GL.GL_BACK,propName='shini', matBind=%d)" % (object.fullName, repr(mat.prop[4])[6:-5],mat.binding[4]))
return log
def logCameraTransformations(self, camera):
warnings.warn("logCameraTransformations is deprecated",
DeprecationWarning, stacklevel=2)
logStr = "self.setCamera('%s', \n"%camera.name
logStr = logStr + "rotation=(%9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f),\n"%tuple(camera.rotation)
logStr=logStr + "translation=(%9.3f, %9.3f, %9.3f),\n"%tuple(camera.translation)
logStr = logStr + "scale=(%9.3f, %9.3f, %9.3f),\n"%tuple(camera.scale)
logStr = logStr + "pivot=(%9.3f, %9.3f, %9.3f),\n"%tuple(camera.pivot)
logStr = logStr + "lookAt=(%9.3f, %9.3f, %9.3f),\n"%tuple(camera.lookAt)
logStr = logStr + "lookFrom=(%9.3f, %9.3f, %9.3f),\n"%tuple(camera.lookFrom)
logStr = logStr + "direction=(%9.3f, %9.3f, %9.3f))"%tuple(camera.direction)
return logStr+'\n'
def logCameraProp(self, camera):
warnings.warn("logCameraProp is deprecated",
DeprecationWarning, stacklevel=2)
logStr = "self.setCamera('%s', \n"%camera.name
logStr = logStr + "width=%d, height=%d, rootx=%d, rooty=%d,"%\
(camera.width, camera.height, camera.rootx, camera.rooty)
logStr = logStr + "fov=%f, near=%f, far=%f,"%\
(camera.fovy, camera.near, camera.far)
logStr = logStr + "color=(%6.3f,%6.3f,%6.3f,%6.3f))"%\
tuple(camera.backgroundColor)
return logStr+'\n'
def logLightTransformations(self, light):
warnings.warn("logLightTransformations is deprecated",
DeprecationWarning, stacklevel=2)
logStr = "self.setLight('%s', \n"%light.name
logStr = logStr + "rotation=(%9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f),\n"%tuple(light.rotation)
logStr = logStr + "translation=(%9.3f, %9.3f, %9.3f),\n"%tuple(light.translation)
logStr = logStr + "scale=(%9.3f, %9.3f, %9.3f),\n"%tuple(light.scale)
logStr = logStr + "pivot=(%9.3f, %9.3f, %9.3f),\n"%tuple(light.pivot)
logStr = logStr + "direction=(%9.3f,%9.3f,%9.3f,%9.3f))"%tuple(light.direction)
return logStr+'\n'
def logLightProp(self, light):
warnings.warn("logLightProp is deprecated",
DeprecationWarning, stacklevel=2)
logStr = "self.setLight('%s', \n"%light.name
logStr = logStr + "enable=%d, visible=%d, length=%f,\n"%\
(light.enabled, light.visible, light.length)
logStr = logStr + "ambient=(%6.3f,%6.3f,%6.3f,%6.3f),\n"%\
tuple(light.ambient)
logStr = logStr + "specular=(%6.3f,%6.3f,%6.3f,%6.3f),\n"%\
tuple(light.specular)
logStr = logStr + "diffuse=(%6.3f,%6.3f,%6.3f,%6.3f))\n"%\
tuple(light.diffuse)
return logStr+'\n'
def logClipTransformations(self, clip):
warnings.warn("logClipTransformations is deprecated",
DeprecationWarning, stacklevel=2)
logStr = "self.setClip('%s', \n"%clip.name
logStr = logStr + "rotation=(%9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f, %9.3f),\n"%tuple(clip.rotation)
logStr = logStr + "translation=(%9.3f, %9.3f, %9.3f),\n"%tuple(clip.translation)
logStr = logStr + "scale=(%9.3f, %9.3f, %9.3f),\n"%tuple(clip.scale)
logStr = logStr + "pivot=(%9.3f, %9.3f, %9.3f))\n"%tuple(clip.pivot)
return logStr+'\n'
def logClipProp(self, clip):
warnings.warn("logClipProp is deprecated",
DeprecationWarning, stacklevel=2)
logStr = "self.setClip('%s', \n"%clip.name
logStr = logStr + "visible=%d, lineWidth=%f,\n"%\
(clip.visible, clip.lineWidth)
logStr = logStr + "color=(%6.3f,%6.3f,%6.3f,%6.3f))\n"%\
tuple(clip.color)
return logStr+'\n'
def logAddClipPlanes(self, object):
warnings.warn("logAddClipPlanes is deprecated",
DeprecationWarning, stacklevel=2)
log = []
for c in object.clipP:
log.append("self.addClipPlane('%s','%s', %d, %d)\n"%\
(object.fullName, c.name, object.clipSide[c.num], 0))
for c in object.clipPI:
log.append("self.addClipPlane('%s','%s', %d, %d)\n"%\
(object.fullName, c.name, object.clipSide[c.num], 1))
return log
def logScene(self):
warnings.warn("logScene is deprecated",
DeprecationWarning, stacklevel=2)
log = []
vi = self.vf.GUI.VIEWER
for c in vi.cameras:
if c._modified:
log.append(self.logCameraTransformations(c))
log.append(self.logCameraProp(c))
for l in vi.lights:
if l._modified:
log.append(self.logLightTransformations(l))
log.append(self.logLightProp(l))
for c in vi.clipP:
if c._modified:
log.append(self.logClipTransformations(c))
log.append(self.logClipProp(c))
root = self.vf.GUI.VIEWER.rootObject
for o in root.AllObjects():
if not o.transformIsIdentity():
log.extend(self.logObjectTransformations(o))
if o._modified:
log.extend(self.logAddClipPlanes(o))
log.extend(self.logObjectMaterial(o))
# Trigger a Viewer Redraw at the end.
log.append("self.GUI.VIEWER.Redraw()")
return log
def savePerspective(self):
if self.vf.resourceFile:
rcFile = os.path.join(os.path.split(self.vf.resourceFile)[0], "perspective")
else:
rcFolder = getResourceFolderWithVersion()
rcFile = os.path.join(rcFolder, "ViewerFramework", "perspective")
try:
rcFile = open(rcFile, 'w')
except Exception, inst: #to avoid "IOError: [Errno 116] Stale NFS file handle" error message when running the tests
return
if self.vf.GUI.floatCamVariable.get():
rcFile.write("self.GUI.floatCamera()\n")
geom = self.vf.GUI.vwrCanvasFloating.geometry()
dum,x0,y0 = geom.split('+')
w,h = [int(x) for x in dum.split('x')]
rcFile.write("self.setCameraSize(%s, %s, xoffset=%s, yoffset=%s)\n"%(w,h,x0,y0))
xywh = self.vf.GUI.getGeom()
#make sure that xywh are within screen coordinates
if xywh[0]+xywh[2] > self.vf.GUI.ROOT.winfo_screenwidth() or\
xywh[1]+xywh[3] > self.vf.GUI.ROOT.winfo_screenheight():
rcFile.write("#"+str(xywh)+"\n")
else:
rcFile.write("self.GUI.setGeom"+str(xywh)+"\n")
# txt = self.vf.GUI.MESSAGE_BOX.tx.get().split("\n")
# if len(txt) > 5:
# txt = txt[5:]
# linesToWrite = []
# for line in txt:
# if line.startswith("self.browseCommands"):
# linesToWrite.append(line)
# linesToWrite = set(linesToWrite)
# for line in linesToWrite:
# line = line.replace('log=0','log=1')
# rcFile.write(line)
# rcFile.write("\n")
rcFile.close()
def __call__(self, ask, **kw):
"""None <- Exit(ask, **kw)
\nask = Flag when set to 1 a form asking you if you really want to quit
will popup, it will quit directly if set to 0.
"""
kw['redraw'] = 0
kw['log'] = 0
kw['busyIdle'] = 0
apply(self.doitWrapper, (ask,),kw)
def doit(self, ask):
logPref = self.vf.userpref['Transformation Logging']['value']
if logPref == 'continuous':
if hasattr(self.vf.GUI,'pendingLog') and self.vf.GUI.pendingLog:
self.vf.log(self.vf.GUI.pendingLog[-1])
if self.vf.userpref.has_key('Save Perspective on Exit'):
logPerspective = self.vf.userpref['Save Perspective on Exit']['value']
if logPerspective == 'yes':
self.savePerspective()
elif logPref == 'final':
#changed 10/24/2005-RH
##code = self.logScene()
#vi = self.vf.GUI.VIEWER
#code = vi.getViewerStateDefinitionCode('self.GUI.VIEWER')
#code.extend( vi.getObjectsStateDefinitionCode('self.GUI.VIEWER') )
#if code:
# for line in code:
# self.vf.log(line)
if hasattr(self.vf, 'logAllFile'):
self.vf.saveSession(self.vf.logAllFile.name)
if ask:
## from ViewerFramework.gui import InputFormDescr
# from mglutil.gui.InputForm.Tk.gui import InputFormDescr
# self.idf = InputFormDescr(title='Do you wish to Quit?')
# self.idf.append({'widgetType':Tkinter.Button,
# 'wcfg':{'text':'QUIT',
# 'width':10,
# 'command':self.quit_cb},
# 'gridcfg':{'sticky':'we'}})
#
# self.idf.append({'widgetType':Tkinter.Button,
# 'wcfg':{'text':'CANCEL',
# 'width':10,
# 'command':self.cancel_cb},
# 'gridcfg':{'sticky':'we', 'row':-1}})
# val = self.vf.getUserInput(self.idf, modal=1, blocking=0,
# okcancel=0)
self.vf.GUI.ROOT.after(10,self.askquit)
else:
self.quit_cb()
def quit_cb(self):
#print "ExitComand.quit_cb"
self.vf.GUI.softquit_cb()
def cancel_cb(self):
form = self.idf.form
form.root.destroy()
return
def guiCallback(self):
#print "ExitComand.guiCallback"
self.doitWrapper(1, redraw=0, log=0)
def askquit(self):
#print "ExitComand.askquit"
import tkMessageBox
ok = tkMessageBox.askokcancel("Quit?","Do you Wish to Quit?")
if ok:
if hasattr(self.vf, 'openedSessions'):
import shutil
for folder in self.vf.openedSessions:
print 'removing session directory', folder
shutil.rmtree(folder)
self.afterDoit = None
self.vf.GUI.ROOT.after(10,self.vf.GUI.quit_cb)
class customAnimationCommand(Command):
"""Command to start Custom Animation notebook widget
"""
def __init__(self, func=None):
Command.__init__(self, func)
self.root = None
self.animNB = None
def guiCallback(self):
self.startCustomAnim_cb()
def __call__(self, **kw):
"""None <- customAnimation"""
add = kw.get('add', None)
if add is None:
add = 1
else: assert add in (0, 1)
if self.vf.GUI.toolbarCheckbuttons.has_key('customAnimation'):
self.vf.GUI.toolbarCheckbuttons['customAnimation']['Variable'].set(add)
self.startCustomAnim_cb()
def startCustomAnim_cb(self):
on = self.vf.GUI.toolbarCheckbuttons['customAnimation']['Variable'].get()
if on:
if not self.animNB:
from Pmv.scenarioInterface.animationGUI import AnimationNotebook
self.root = Tkinter.Toplevel()
self.root.title('Custom Animation')
self.root.protocol("WM_DELETE_WINDOW", self.hide_cb)
vi = self.vf.GUI.VIEWER
self.animNB = AnimationNotebook(self.vf, self.root)
else:
self.show_cb()
else:
self.hide_cb()
def hide_cb(self):
if self.root:
self.root.withdraw()
self.vf.GUI.toolbarCheckbuttons['customAnimation']['Variable'].set(0)
def show_cb(self, event=None):
if self.root:
self.root.deiconify()
self.vf.GUI.toolbarCheckbuttons['customAnimation']['Variable'].set(1)
def onAddCmdToViewer(self):
if self.vf.hasGui:
# add a page for scenario
page = self.scenarioMaster = self.vf.GUI.toolsNoteBook.add("AniMol")
from Pmv.scenarioInterface.animationGUI import AnimationNotebook
self.animNB = AnimationNotebook(self.vf, page)
button = Tkinter.Radiobutton(
self.vf.GUI.toolsButtonBarMaster, width=10,
var=self.vf.GUI.toolsButtonBarTabVar,
value='Scenario', indicatoron=False,
text='Scenario', font=('Helvetica', '', 10), padx=3, pady=0)
button.pack(side='left', anchor='w')
#button = self.vf.GUI.toolsNoteBook.tab(1)
button.configure(command=self.adjustWidth)
def adjustWidth(self):
self.vf.GUI.toolsNoteBook.selectpage(1)
self.vf.GUI.workspace.configurepane('ToolsNoteBook',size=self.animNB.master.winfo_width())
| 40.293867 | 203 | 0.520072 |
7d7f2e4efc55d4c2d3526d9ccc55cad1a50d40ef | 9,807 | py | Python | tests/generator/test_compression.py | mediaxina/chia-blockchain | 44ba53550dde479d2ad5a1c4fc2fa70e17dceed1 | [
"Apache-2.0"
] | 1 | 2021-06-15T16:06:06.000Z | 2021-06-15T16:06:06.000Z | tests/generator/test_compression.py | mediaxina/chia-blockchain | 44ba53550dde479d2ad5a1c4fc2fa70e17dceed1 | [
"Apache-2.0"
] | 19 | 2021-06-27T23:17:14.000Z | 2022-03-29T06:10:30.000Z | tests/generator/test_compression.py | mediaxina/chia-blockchain | 44ba53550dde479d2ad5a1c4fc2fa70e17dceed1 | [
"Apache-2.0"
] | null | null | null | # flake8: noqa: F501
from unittest import TestCase
from chia.full_node.bundle_tools import (
bundle_suitable_for_compression,
simple_solution_generator,
compressed_spend_bundle_solution,
match_standard_transaction_at_any_index,
)
from chia.full_node.generator import run_generator, create_generator_args
from chia.types.blockchain_format.program import Program, SerializedProgram, INFINITE_COST
from chia.types.generator_types import CompressorArg
from chia.types.spend_bundle import SpendBundle
from chia.util.byte_types import hexstr_to_bytes
from chia.util.ints import uint32
from chia.wallet.puzzles.load_clvm import load_clvm
from tests.core.make_block_generator import make_spend_bundle
from clvm_tools import binutils
TEST_GEN_DESERIALIZE = load_clvm("test_generator_deserialize.clvm", package_or_requirement="chia.wallet.puzzles")
DESERIALIZE_MOD = load_clvm("chialisp_deserialisation.clvm", package_or_requirement="chia.wallet.puzzles")
DECOMPRESS_PUZZLE = load_clvm("decompress_puzzle.clvm", package_or_requirement="chia.wallet.puzzles")
DECOMPRESS_CSE = load_clvm("decompress_coin_solution_entry.clvm", package_or_requirement="chia.wallet.puzzles")
DECOMPRESS_CSE_WITH_PREFIX = load_clvm(
"decompress_coin_solution_entry_with_prefix.clvm", package_or_requirement="chia.wallet.puzzles"
)
DECOMPRESS_BLOCK = load_clvm("block_program_zero.clvm", package_or_requirement="chia.wallet.puzzles")
Nil = Program.from_bytes(b"\x80")
original_generator = hexstr_to_bytes(
"ff01ffffffa00000000000000000000000000000000000000000000000000000000000000000ff830186a080ffffff02ffff01ff02ffff01ff02ffff03ff0bffff01ff02ffff03ffff09ff05ffff1dff0bffff1effff0bff0bffff02ff06ffff04ff02ffff04ff17ff8080808080808080ffff01ff02ff17ff2f80ffff01ff088080ff0180ffff01ff04ffff04ff04ffff04ff05ffff04ffff02ff06ffff04ff02ffff04ff17ff80808080ff80808080ffff02ff17ff2f808080ff0180ffff04ffff01ff32ff02ffff03ffff07ff0580ffff01ff0bffff0102ffff02ff06ffff04ff02ffff04ff09ff80808080ffff02ff06ffff04ff02ffff04ff0dff8080808080ffff01ff0bffff0101ff058080ff0180ff018080ffff04ffff01b081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3ff018080ffff80ffff01ffff33ffa06b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9ff830186a08080ff8080808080"
) # noqa
class TestCompression(TestCase):
def test_spend_bundle_suitable(self):
sb: SpendBundle = make_spend_bundle(1)
assert bundle_suitable_for_compression(sb)
def test_compress_spend_bundle(self):
pass
def test_compressed_block_results(self):
sb: SpendBundle = make_spend_bundle(1)
start, end = match_standard_transaction_at_any_index(original_generator)
ca = CompressorArg(uint32(0), SerializedProgram.from_bytes(original_generator), start, end)
c = compressed_spend_bundle_solution(ca, sb)
s = simple_solution_generator(sb)
assert c != s
cost_c, result_c = run_generator(c, INFINITE_COST)
cost_s, result_s = run_generator(s, INFINITE_COST)
print(result_c)
assert result_c is not None
assert result_s is not None
assert result_c == result_s
class TestDecompression(TestCase):
def __init__(self, *args, **kwargs):
super(TestDecompression, self).__init__(*args, **kwargs)
self.maxDiff = None
def test_deserialization(self):
self.maxDiff = None
cost, out = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [bytes(Program.to("hello"))])
assert out == Program.to("hello")
def test_deserialization_as_argument(self):
self.maxDiff = None
cost, out = TEST_GEN_DESERIALIZE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, Nil, bytes(Program.to("hello"))]
)
print(bytes(Program.to("hello")))
print()
print(out)
assert out == Program.to("hello")
def test_decompress_puzzle(self):
cost, out = DECOMPRESS_PUZZLE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, b"\xff", bytes(Program.to("pubkey")), b"\x80"]
)
print()
print(out)
# An empty CSE is invalid. (An empty CSE list may be okay)
# def test_decompress_empty_cse(self):
# cse0 = binutils.assemble("()")
# cost, out = DECOMPRESS_CSE.run_with_cost(INFINITE_COST, [DESERIALIZE_MOD, DECOMPRESS_PUZZLE, b"\xff", b"\x80", cse0])
# print()
# print(out)
def test_decompress_cse(self):
""" Decompress a single CSE / CoinSolutionEntry """
cse0 = binutils.assemble(
"((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ())))"
) # noqa
cost, out = DECOMPRESS_CSE.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, DECOMPRESS_PUZZLE, b"\xff", b"\x80", cse0]
)
print()
print(out)
def test_decompress_cse_with_prefix(self):
cse0 = binutils.assemble(
"((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ())))"
) # noqa
start = 2 + 44
end = start + 238
prefix = original_generator[start:end]
# (deserialize decompress_puzzle puzzle_prefix cse)
cost, out = DECOMPRESS_CSE_WITH_PREFIX.run_with_cost(
INFINITE_COST, [DESERIALIZE_MOD, DECOMPRESS_PUZZLE, prefix, cse0]
)
print()
print(out)
def test_block_program_zero(self):
"Decompress a list of CSEs"
self.maxDiff = None
cse1 = binutils.assemble(
"(((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))))"
) # noqa
cse2 = binutils.assemble(
"""
(
((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0)
(0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3
(() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))
)
((0x0000000000000000000000000000000000000000000000000000000000000001 0x0186a0)
(0xb0a6207f5173ec41491d9f2c1b8fff5579e13703077e0eaca8fe587669dcccf51e9209a6b65576845ece5f7c2f3229e7e3
(() (q (51 0x24254a3efc3ebfac9979bbe0d615e2eda043aa329905f65b63846fa24149e2b6 0x0186a0)) ())))
)
"""
) # noqa
start = 2 + 44
end = start + 238
# (mod (decompress_puzzle decompress_coin_solution_entry start end compressed_cses deserialize generator_list reserved_arg)
# cost, out = DECOMPRESS_BLOCK.run_with_cost(INFINITE_COST, [DECOMPRESS_PUZZLE, DECOMPRESS_CSE, start, Program.to(end), cse0, DESERIALIZE_MOD, bytes(original_generator)])
cost, out = DECOMPRESS_BLOCK.run_with_cost(
INFINITE_COST,
[
DECOMPRESS_PUZZLE,
DECOMPRESS_CSE_WITH_PREFIX,
start,
Program.to(end),
cse2,
DESERIALIZE_MOD,
bytes(original_generator),
],
)
print()
print(out)
def test_block_program_zero_with_curry(self):
self.maxDiff = None
cse1 = binutils.assemble(
"(((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0) (0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3 (() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))))"
) # noqa
cse2 = binutils.assemble(
"""
(
((0x0000000000000000000000000000000000000000000000000000000000000000 0x0186a0)
(0xb081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3
(() (q (51 0x6b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9 0x0186a0)) ()))
)
((0x0000000000000000000000000000000000000000000000000000000000000001 0x0186a0)
(0xb0a6207f5173ec41491d9f2c1b8fff5579e13703077e0eaca8fe587669dcccf51e9209a6b65576845ece5f7c2f3229e7e3
(() (q (51 0x24254a3efc3ebfac9979bbe0d615e2eda043aa329905f65b63846fa24149e2b6 0x0186a0)) ())))
)
"""
) # noqa
start = 2 + 44
end = start + 238
# (mod (decompress_puzzle decompress_coin_solution_entry start end compressed_cses deserialize generator_list reserved_arg)
# cost, out = DECOMPRESS_BLOCK.run_with_cost(INFINITE_COST, [DECOMPRESS_PUZZLE, DECOMPRESS_CSE, start, Program.to(end), cse0, DESERIALIZE_MOD, bytes(original_generator)])
p = DECOMPRESS_BLOCK.curry(DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, start, Program.to(end))
cost, out = p.run_with_cost(INFINITE_COST, [cse2, DESERIALIZE_MOD, bytes(original_generator)])
print()
print(p)
print(out)
p_with_cses = DECOMPRESS_BLOCK.curry(
DECOMPRESS_PUZZLE,
DECOMPRESS_CSE_WITH_PREFIX,
start,
Program.to(end),
cse2,
DESERIALIZE_MOD,
)
generator_args = create_generator_args([SerializedProgram.from_bytes(original_generator)])
cost, out = p_with_cses.run_with_cost(INFINITE_COST, generator_args)
print()
print(p_with_cses)
print(out)
| 45.193548 | 792 | 0.744162 |
0bc23cf87fa43662a21fd698b99ce731e5c1b4ed | 5,259 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/operations/_usages_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/operations/_usages_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_04_01/operations/_usages_operations.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations(object):
"""UsagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.UsagesListResult"]
"""List network usages for a subscription.
:param location: The location where resource usage is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either UsagesListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_04_01.models.UsagesListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.UsagesListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('UsagesListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/usages'} # type: ignore
| 44.567797 | 134 | 0.649173 |
a4360e297625ffc7cb602f5e2ee4dc8ae23db5ba | 2,816 | py | Python | examples/04_Resonator_Mask/write_mask.py | nanophysics/nanoGDS | 606ca36bafb4ff844b70cfe46520e62df35edc6f | [
"MIT"
] | 1 | 2020-09-15T15:27:13.000Z | 2020-09-15T15:27:13.000Z | examples/04_Resonator_Mask/write_mask.py | nanophysics/nanoGDS | 606ca36bafb4ff844b70cfe46520e62df35edc6f | [
"MIT"
] | null | null | null | examples/04_Resonator_Mask/write_mask.py | nanophysics/nanoGDS | 606ca36bafb4ff844b70cfe46520e62df35edc6f | [
"MIT"
] | null | null | null | import nanogds
import gdspy
import numpy as np
import helpers
PI = np.pi
def save_single_design(save_name, shape):
lib = nanogds.GDS()
lib.load_gds("Markerchip Variant B", "markerchip_variant_B.gds")
lib.add("Resonator", shape)
lib.save(f"designs/{save_name}")
if __name__ == "__main__":
RESONATOR_MEANDER = 150 # total resonator length 9053 um
MAPPING = {
"B": [3, 4, 5, 6],
"C": [7, 8, 9, 10],
"D": [11, 12, 13, 14],
"E": [15, 16, 17, 18],
}
mask = nanogds.MaskTemplate("mask_template")
### A1 --- resonator only
shape = helpers.get_resonator_shape(
resonator_meander=RESONATOR_MEANDER,
finger_length=0,
finger_gap=6,
extend_ground=True,
add_tap1=False,
add_center_tap=False,
add_tap2=False,
)
mask.add_reference("A1_SHAPE", shape.shapes, "A1")
save_single_design("A1", shape)
### A2 --- resonator with gate taps
shape = helpers.get_resonator_shape(
resonator_meander=RESONATOR_MEANDER,
finger_length=0,
finger_gap=6,
extend_ground=True,
add_tap1=True,
add_tap2=True,
add_center_tap=False,
)
mask.add_reference("A2_SHAPE", shape.shapes, "A2")
save_single_design("A2", shape)
### A3 --- resonator with gate and center taps
shape = helpers.get_resonator_shape(
resonator_meander=RESONATOR_MEANDER,
finger_length=0,
finger_gap=6,
extend_ground=True,
add_tap1=True,
add_tap2=True,
add_center_tap=True,
)
mask.add_reference("A3_SHAPE", shape.shapes, "A3")
save_single_design("A3", shape)
### A4 --- resonator only
shape = helpers.get_resonator_shape(
resonator_meander=RESONATOR_MEANDER,
finger_length=0,
finger_gap=6,
extend_ground=True,
add_tap1=True,
add_center_tap=True,
add_tap2=True,
add_ground_connection=True,
)
mask.add_reference("A4_SHAPE", shape.shapes, "A4")
save_single_design("A4", shape)
### COLUMNS B - E --- variation of different finger lengths (see MAPPING)
for column, lengths in MAPPING.items():
for split, length in zip([f"{column}{i+1}" for i in range(4)], lengths):
shape = helpers.get_resonator_shape(
resonator_meander=RESONATOR_MEANDER,
finger_length=length,
finger_gap=3,
extend_ground=True,
add_tap1=True,
add_tap2=True,
add_center_tap=True,
)
mask.add_reference(f"{split}_SHAPE", shape.shapes, split)
save_single_design(split, shape)
### write mask
mask.write("MASK")
| 28.444444 | 87 | 0.595881 |
4b24102db5d7776974ec35fa8b4242b93b37ef52 | 3,845 | py | Python | tests/lambda_handlers_tests/test_checkDownloadHandler.py | mondele/tx-manager | ddbbeeae5990a327ffc14b42c478d3ea435c0533 | [
"MIT"
] | 3 | 2017-03-17T02:25:21.000Z | 2017-05-18T22:18:20.000Z | tests/lambda_handlers_tests/test_checkDownloadHandler.py | mondele/tx-manager | ddbbeeae5990a327ffc14b42c478d3ea435c0533 | [
"MIT"
] | 184 | 2016-10-13T02:56:16.000Z | 2021-03-25T21:27:20.000Z | tests/lambda_handlers_tests/test_checkDownloadHandler.py | mondele/tx-manager | ddbbeeae5990a327ffc14b42c478d3ea435c0533 | [
"MIT"
] | 16 | 2016-09-15T23:34:19.000Z | 2019-07-25T07:06:32.000Z | from __future__ import absolute_import, unicode_literals, print_function
import json
import unittest
from libraries.door43_tools.download_metrics import DownloadMetrics
from libraries.lambda_handlers.check_download_handler import CheckDownloadHandler
from moto import mock_s3
from libraries.app.app import App
@mock_s3
class CheckDownloadsTest(unittest.TestCase):
def setUp(self):
"""Runs before each test."""
App(prefix='{0}-'.format(self._testMethodName), db_connection_string='sqlite:///:memory:')
App.pre_convert_s3_handler().create_bucket()
def test_check_present_download(self):
# given
commit_id = '39a099622d'
key = 'preconvert/' + commit_id + '.zip'
App.pre_convert_s3_handler().put_contents(key, "dummy")
exists = App.pre_convert_s3_handler().key_exists(key)
self.callback = 'callback'
event = {
'data': {
'commit_id': commit_id,
'callback': self.callback
}
}
self.expected_download_exists = True
self.error_response = None
handler = CheckDownloadHandler()
# when
results = handler.handle(event, None)
# then
self.validate_results(results)
def test_check_missing_download(self):
# given
commit_id = '39a099622d'
self.callback = 'callback'
event = {
'data': {
'commit_id': commit_id,
'callback': self.callback
}
}
self.expected_download_exists = False
self.error_response = None
handler = CheckDownloadHandler()
# when
results = handler.handle(event, None)
# then
self.validate_results(results)
def test_check_invalid_download(self):
# given
commit_id = ''
self.callback = 'callback'
event = {
'data': {
'commit_id': commit_id,
'callback': self.callback
}
}
self.expected_download_exists = False
self.error_response = DownloadMetrics.ACCESS_FAILED_ERROR + commit_id
handler = CheckDownloadHandler()
# when
results = handler.handle(event, None)
# then
self.validate_results(results)
def test_check_access_error(self):
# given
commit_id = '39a099622d'
self.callback = 'callback'
event = {
'vars': {
'pre_convert_bucket': 'invalid-bucket'
},
'data': {
'commit_id': commit_id,
'callback': self.callback
}
}
self.expected_download_exists = False
self.error_response = DownloadMetrics.ACCESS_FAILED_ERROR + commit_id
handler = CheckDownloadHandler()
# when
results = handler.handle(event, None)
# then
self.validate_results(results)
#
# helpers
#
def validate_results(self, response):
callback, data, valid_jsonp = self.parse_jsonp(response)
self.assertEqual(self.callback, callback)
self.assertTrue(valid_jsonp)
if self.error_response:
self.assertEqual(self.error_response, data['ErrorMessage'])
else:
self.assertEqual(self.expected_download_exists, data['download_exists'])
def parse_jsonp(self, text):
valid = False
callback = None
data = None
try:
prefix = text.split('(')
dummy_test = '__'
payload = (prefix[1] + dummy_test).split(')')
callback = prefix[0]
data = json.loads(payload[0])
valid = (payload[1] == dummy_test) and (len(data) > 0)
except:
pass
return callback, data, valid
| 29.128788 | 98 | 0.584135 |
0853cf3f3bd4befb0a01c480739645e6dd67072e | 790 | py | Python | HackerRank Solutions/Algorithms/Strings/Game of Thrones - I/Game of Thrones - I.py | DevashishPathrabe/Competetive-Coding | 91049459359854b7834cbfb31415682600dc9c57 | [
"MIT"
] | null | null | null | HackerRank Solutions/Algorithms/Strings/Game of Thrones - I/Game of Thrones - I.py | DevashishPathrabe/Competetive-Coding | 91049459359854b7834cbfb31415682600dc9c57 | [
"MIT"
] | null | null | null | HackerRank Solutions/Algorithms/Strings/Game of Thrones - I/Game of Thrones - I.py | DevashishPathrabe/Competetive-Coding | 91049459359854b7834cbfb31415682600dc9c57 | [
"MIT"
] | null | null | null | #!/bin/python3
import math
import os
import random
import re
import sys
from collections import Counter
#
# Complete the 'gameOfThrones' function below.
#
# The function is expected to return a STRING.
# The function accepts STRING s as parameter.
#
def gameOfThrones(s):
# Write your code here
count = Counter(s)
if len(s)%2 == 0:
result = all([x%2 == 0 for x in count.values()])
else:
if len(list(filter(lambda x: x%2 == 1, count.values()))) == 1:
result = True
else:
result = False
if result:
return 'YES'
else:
return 'NO'
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = input()
result = gameOfThrones(s)
fptr.write(result + '\n')
fptr.close()
| 19.268293 | 70 | 0.596203 |
26e6dfaa0baa43f868ae61dffcda47140e2647fe | 39,041 | py | Python | HelpDesk-LabReport.py | flopezag/fiware-scrum-reports | 56773c2b1d0603f019f08ca7b66fc091e2b975a0 | [
"Apache-2.0"
] | null | null | null | HelpDesk-LabReport.py | flopezag/fiware-scrum-reports | 56773c2b1d0603f019f08ca7b66fc091e2b975a0 | [
"Apache-2.0"
] | 6 | 2018-09-04T08:49:29.000Z | 2018-09-05T10:31:32.000Z | HelpDesk-LabReport.py | flopezag/fiware-scrum-reports | 56773c2b1d0603f019f08ca7b66fc091e2b975a0 | [
"Apache-2.0"
] | null | null | null | import os
import operator
from collections import OrderedDict
from datetime import date, datetime
import xlsxwriter
from xlsxwriter.utility import xl_range
from kernel.Calendar import agileCalendar
from kernel.DataBoard import Data
from kernel.DataFactory import DataEngine
from kernel.NM_Aggregates import Deck, LabDeck, ChapterDeck
from kernel.NM_HelpDeskReporter import DeckReporter, TechChapterReporter, LabChannelReporter
from kernel.Reporter import CoordinationReporter
from kernel.Settings import settings
from kernel.SheetFormats import SpreadsheetFormats
from kernel.UploaderTool import Uploader
from kernel.NodesBook import helpdeskNodesBook
from functools import reduce
__author__ = "Fernando López"
chapters = 'Lab'
class Painter:
def __init__(self, wb, ws):
self._wb = wb
self._ws = ws
self._column = 10
def draw_composition(self, data):
data = {item: data[item] for item in data if data[item]}
wb, ws = self._wb, self._ws
chart = wb.add_chart({'type': 'pie'})
headings = ('Composition', '# Items')
col = self._column
ws.write_row(0, col, headings)
ws.write_column(1, col+0, data)
ws.write_column(1, col+1, [data[k] for k in data])
sheet_name = ws.get_name()
chart.add_series({
'name': [sheet_name, 0, col],
'categories': [sheet_name, 1, col, len(data), col],
'values': [sheet_name, 1, col+1, len(data), col+1],
'data_labels': {'value': True, 'percentage': True}
})
chart.set_title({'name': 'Composition'})
chart.set_legend({'position': 'top'})
chart.set_size({'width': 250, 'height': 300, 'x_scale': 1, 'y_scale': 1})
chart.set_plotarea({'fill': {'color': '#FFFF99'}})
chart.set_style(2)
self._column += len(headings) + 1
return chart
def draw_status(self, data):
data = {item: data[item] for item in data if data[item]}
wb = self._wb
ws = self._ws
chart = wb.add_chart({'type': 'bar'})
headings = ('Status', '#Items')
col = self._column
status = ('Closed', 'Answered', 'Impeded', 'In Progress', 'Open')
ws.write_row(0, col, headings)
ws.write_column(1, col+0, status)
value = (lambda x, y: y[x] if x in y else 0)
ws.write_column(1, col+1, [value(k, data) for k in status])
sheet_name = ws.get_name()
chart.add_series({
'name': [sheet_name, 0, col],
'categories': [sheet_name, 1, col, len(status), col],
'values': [sheet_name, 1, col+1, len(status), col+1],
'data_labels': {'value': True}
})
chart.set_title({'name': 'Status'})
chart.set_legend({'none': True})
chart.set_x_axis({'name': '# items'})
chart.set_size({'width': 300, 'height': 300, 'x_scale': 1, 'y_scale': 1})
chart.set_plotarea({'fill': {'color': '#FFFF99'}})
chart.set_style(2)
self._column += len(headings) + 1
return chart
def draw_resolution(self, data):
data = {item: data[item] for item in data if data[item]}
wb = self._wb
ws = self._ws
chart = wb.add_chart({'type': 'bar'})
headings = ('Resolution', '#Items')
resolutions = ('Done',
'Fixed',
'Dismissed',
'Incomplete',
'Duplicate',
'Cannot Reproduce',
'New functionality',
"Won't Fix")
col = self._column
value = (lambda x, y: y[x] if x in y else 0)
ws.write_row(0, col, headings)
ws.write_column(1, col+0, resolutions)
ws.write_column(1, col+1, [value(k, data) for k in resolutions])
sheet_name = ws.get_name()
chart.add_series({
'name': [sheet_name, 0, col],
'categories': [sheet_name, 1, col, len(resolutions), col],
'values': [sheet_name, 1, col+1, len(resolutions), col+1],
'data_labels': {'value': True}
})
chart.set_title({'name': 'Resolution'})
chart.set_x_axis({'name': '# items'})
chart.set_legend({'none': True})
chart.set_size({'width': 450, 'height': 300, 'x_scale': 1, 'y_scale': 1})
chart.set_plotarea({'fill': {'color': '#FFFF99'}})
chart.set_style(2)
self._column += len(headings) + 1
return chart
def draw_evolution(self, data):
wb = self._wb
ws = self._ws
chart = wb.add_chart({'type': 'line'})
headings = ('Month', 'Created', 'Resolved', 'Progress')
col = self._column
ws.write_row(0, col, headings)
# print(data['categories'])
ws.write_column(1, col+0, data['categories'])
ws.write_column(1, col+1, data['created']['data'])
ws.write_column(1, col+2, data['resolved']['data'])
ws.write_column(1, col+3, data['progress']['data'])
ws.write_column(1, col+4, [0]*(len(data['categories'])))
sheet_name = ws.get_name()
chart.add_series({
'name': [sheet_name, 0, col+1],
'categories': [sheet_name, 1, col+0, len(data['categories']), col+0],
'values': [sheet_name, 1, col+1, len(data['created']['data']), col+1],
'line': {'color': '#008000'} # created - green
})
chart.add_series({
'name': [sheet_name, 0, col+2],
'categories': [sheet_name, 1, col+0, len(data['categories']), col+0],
'values': [sheet_name, 1, col+2, len(data['resolved']['data']), col+2],
'line': {'color': '#0000FF'} # resolve - cyan
})
cchart = wb.add_chart({'type': 'column'})
cchart.add_series({
'name': [sheet_name, 0, col+3],
'categories': [sheet_name, 1, col+0, len(data['categories']), col+0],
'values': [sheet_name, 1, col+3, len(data['categories']), col+3],
# 'values': [sheet_name, 1, col+5, len(data['progress']), col+5],
'data_labels': {'value': True},
'fill': {'color': '#FF00FF'} # resolve - magenta
})
chart.combine(cchart)
chart.set_title({'name': 'Helpdesk Evolution'})
chart.set_x_axis({'name': '# Month'})
chart.set_y_axis({'name': '# items'})
chart.set_legend({'position': 'top'})
chart.set_size({'width': 1000, 'height': 288, 'x_scale': 1, 'y_scale': 1})
chart.set_plotarea({'fill': {'color': '#FFFF99'}})
chart.set_style(2)
self._column += len(headings) + 1
return chart
def draw_resolution_time(self, data):
wb = self._wb
ws = self._ws
chart = wb.add_chart({'type': 'column'})
headings = ('#days', 'Recent', 'Mature', 'Pending')
col = self._column
ws.write_row(0, col, headings)
ws.write_column(1, col+0, data['categories'])
ws.write_column(1, col+1, data['recent']['data'])
ws.write_column(1, col+2, data['time']['data'])
ws.write_column(1, col+3, data['age']['data'])
sheet_name = ws.get_name()
chart.add_series({
'name': [sheet_name, 0, col+1],
'categories': [sheet_name, 1, col+0, len(data['categories']), col+0],
'values': [sheet_name, 1, col+1, len(data['recent']['data']), col+1],
'fill': {'color': '#008000'} # recent - green
})
chart.add_series({
'name': [sheet_name, 0, col+2],
'categories': [sheet_name, 1, col+0, len(data['categories']), col+0],
'values': [sheet_name, 1, col+2, len(data['time']['data']), col+2],
'fill': {'color': '#0000FF'} # mature - blue
})
chart.add_series({
'name': [sheet_name, 0, col+3],
'categories': [sheet_name, 1, col+0, len(data['categories']), col+0],
'values': [sheet_name, 1, col+3, len(data['age']['data']), col+3],
'fill': {'color': '#FF0000'} # pending - red
})
chart.set_title({'name': 'Helpdesk Resolution Time'})
chart.set_x_axis({'name': '# days'})
chart.set_y_axis({'name': '# issues'})
chart.set_legend({'position': 'top'})
chart.set_size({'width': 1000, 'height': 288, 'x_scale': 1, 'y_scale': 1})
chart.set_plotarea({'fill': {'color': '#CCFFCC'}})
chart.set_style(2)
self._column += len(headings) + 1
return chart
def draw_enablers_status(self, data):
wb, ws = self._wb, self._ws
data = {k: data[k][0] for k in data}
_data = sorted({k: sum(data[k].values()) for k in data}.items(), key=operator.itemgetter(1), reverse=True)
_data = OrderedDict([(item[0], data[item[0]]) for item in _data])
chart = wb.add_chart({'type': 'bar', 'subtype': 'stacked'})
status = ('Open', 'Answered', 'In Progress', 'Impeded', 'Closed')
enablers = list(_data.keys())
headings = ('Enabler',) + status
col = self._column
ws.write_row(0, col, headings)
ws.write_column(1, col+0, enablers)
for i, _status in enumerate(status, start=1):
ws.write_column(1, col+i, [_data[enabler][_status] for enabler in _data])
sheet_name = ws.get_name()
for i, _status in enumerate(status, start=1):
chart.add_series({
'name': [sheet_name, 0, col+i],
'categories': [sheet_name, 1, col+0, len(enablers), col+0],
'values': [sheet_name, 1, col+i, len(enablers), col+i],
'data_labels': {'value': True}
})
chart.set_title({'name': "Enablers' Help Desk Status"})
chart.set_y_axis({'name': 'Enablers'})
chart.set_x_axis({'name': '# items'})
chart.set_legend({'position': 'top'})
chart.set_size({'width': 500, 'height': 1600, 'x_scale': 1, 'y_scale': 1})
chart.set_plotarea({'fill': {'color': '#FFFF99'}})
chart.set_style(2)
self._column += len(headings) + 1
return chart
def draw_nodes_contribution(self, data):
wb, ws = self._wb, self._ws
nodes = data.keys()
chart = wb.add_chart({'type': 'bar'})
headings = ('Nodes', 'Size')
col = self._column
ws.write_row(0, col, headings)
ws.write_column(1, col+0, nodes)
# ws.write_column(1, col+1, [sum(data[node][0].values()) for node in nodes])
ws.write_column(1, col+1, [len(data[node]) for node in nodes])
sheet_name = ws.get_name()
chart.add_series({
'name': [sheet_name, 0, col+1],
'categories': [sheet_name, 1, col+0, len(nodes), col+0],
'values': [sheet_name, 1, col+1, len(nodes), col+1],
'data_labels': {'value': True}
})
chart.set_title({'name': "Nodes' Help Desk Contribution"})
chart.set_y_axis({'name': 'Enablers'})
chart.set_x_axis({'name': '# items'})
chart.set_legend({'position': 'top'})
height = 100 + 35 * len(nodes)
chart.set_size({'width': 500, 'height': height, 'x_scale': 1, 'y_scale': 1})
chart.set_plotarea({'fill': {'color': '#FFFF99'}})
chart.set_style(2)
self._column += len(headings) + 1
return chart
@staticmethod
def calculate_delta_time(a_issue_field):
resolution_date = a_issue_field['resolutiondate']
created_date = a_issue_field['created']
resolution_date = datetime.strptime(resolution_date, '%Y-%m-%dT%H:%M:%S.%f%z')
created_date = datetime.strptime(created_date, '%Y-%m-%dT%H:%M:%S.%f%z')
result = (resolution_date - created_date).total_seconds() / 86400
return result
def draw_nodes_service_time(self, data):
wb, ws = self._wb, self._ws
nodes = data.keys()
chart = wb.add_chart({'type': 'bar'})
headings = ('Nodes', 'Overall Mean')
col = self._column
ws.write_row(0, col, headings)
ws.write_column(1, col+0, nodes)
a = {}
for node in nodes:
aux_data = data[node].data
aux_data = list(map(lambda x: Painter.calculate_delta_time(x['fields']), aux_data))
if len(aux_data) == 0:
a[node] = 0
else:
a[node] = reduce(lambda x, y: x+y, aux_data) / len(aux_data)
ws.write_column(1, col+1, [a[node] for node in nodes])
sheet_name = ws.get_name()
chart.add_series({
'name': [sheet_name, 0, col+1],
'categories': [sheet_name, 1, col+0, len(nodes), col+0],
'values': [sheet_name, 1, col+1, len(nodes), col+1],
'data_labels': {'value': True}
})
chart.set_title({'name': "Nodes' Help Desk Service Time"})
chart.set_y_axis({'name': 'Enablers'})
chart.set_x_axis({'name': '# days'})
chart.set_legend({'position': 'top'})
height = 100 + 35 * len(nodes)
chart.set_size({'width': 500, 'height': height, 'x_scale': 1, 'y_scale': 1})
chart.set_plotarea({'fill': {'color': '#CCFFCC'}})
chart.set_style(2)
self._column += len(headings) + 1
return chart
class HelpDeskLabReporter:
def __init__(self):
self.calendar = agileCalendar
self.workbook = None
self.spFormats = None
self.data, self.timestamp, self.source = Data.getHelpDeskLabChannel()
self.deck = Deck(self.data, self.timestamp, self.source)
self.start = date(2016, 12, 1) # year, month, day
self.end = date(2017, 11, 30) # year, month, day
self.reporter = LabChannelReporter(self.deck, start=self.start, end=self.end)
self.reporter.deck = self.deck
def _coordination_helpdesk(self, coordination):
wb = self.workbook
ws = wb.add_worksheet(coordination.name[1:])
backlog = self.factory.getCoordinationBacklog(coordination.key)
backlog.sort(key=backlog.sortDict['name'])
painter = Painter(wb, ws)
ws.set_zoom(80)
ws.set_column(0, 0, 30)
ws.set_column(1, 1, 122)
ws.set_column(2, 5, 20)
row, col = 0, 0
_heading = self.workbook.add_format({'bold': True, 'font_size': 30,
'bg_color': '#002D67', 'font_color': '#FFE616', 'align': 'center'})
ws.merge_range(xl_range(row, 0, row, 3), "Coordination Backlog", _heading)
ws.set_row(0, 42)
ws.insert_image(0, 0, settings.logofiware, {'x_scale': 0.5, 'y_scale': 0.5, 'x_offset': 0, 'y_offset': 0})
row += 1
ws.write(row, 0, 'Project Time:', self.spFormats.bold_right)
ws.write(row, 1, '{}'.format(agileCalendar.projectTime()))
ws.write(row, 2, 'Report Date:', self.spFormats.bold_right)
ws.write(row, 3, date.today().strftime('%d-%m-%Y'))
row += 1
ws.write(row, 0, 'Start of Data Analysis:', self.spFormats.bold_right)
ws.write(row, 1, '{}'.format(agileCalendar.projectTime(current_date=self.start)))
row += 1
ws.write(row, 0, 'End of Data Analysis:', self.spFormats.bold_right)
ws.write(row, 1, '{}'.format(agileCalendar.projectTime(current_date=self.end)))
row += 2
_format = self.workbook.add_format({'bold': True, 'font_size': 15, 'bg_color': '#60C1CF'})
ws.write(row, 0, 'Backlog Owner:', self.spFormats.bold_right)
ws.write(row, 1, coordination.leader, _format)
ws.write(row, 2, '', _format)
row += 2
ws.write(row, 0, 'Backlog Summary:', self.spFormats.bold_right)
ws.write(row, 1, '# Items', self.spFormats.bold_left)
row += 1
reporter = CoordinationReporter(coordination.project, backlog)
data = reporter.issueType
ws.write(row, 0, 'Composition', self.spFormats.bold_right)
ws.write(row, 1, '{0} Issues = {Epic} Epics + {Feature} Features + '
'{Story} User Stories + {WorkItem} WorkItems + {Bug} Bugs'.format(sum(data.values()), **data))
row += 1
data = reporter.perspective
ws.write(row, 0, 'Status', self.spFormats.bold_right)
ws.write(row, 1, '{0} Issues = {Implemented} Implemented + {Working On} Working On + '
' {Foreseen} Foreseen'.format(sum(data.values()), **data))
row += 1
data = reporter.sprint_status
ws.write(row, 0, 'Sprint Status', self.spFormats.red_bold_right)
ws.write_string(row, 1, '{} Issues = {}'.format(sum(data.values()),
' + '.join("{!s} {}".format(v, k) for (k, v) in data.items())))
row += 1
ws.write(row, 0, 'Tests', self.spFormats.bold_right)
data = reporter.backlog.testMetrics
total = sum(data['OK'].values()) + sum(data['KO'].values())
ws.write_rich_string(row, 1,
'{0:,} Tests = {1:,}'.format(total, sum(data['OK'].values())),
self.spFormats.green, ' OK', ' + ',
'{0:,}'.format(sum(data['KO'].values())), self.spFormats.red, ' KO ')
row += 1
data = reporter.errors
ws.write(row, 0, 'Errors', self.spFormats.bold_right)
ws.write_rich_string(row, 1,
'{:,} Issues = {OK:,}'.format(sum(data.values()), **data),
self.spFormats.green,
' OK',
' + '
' {KO:,}'.format(sum(data.values()), **data), self.spFormats.red, ' KO')
row += 2
chart = painter.draw_composition(reporter.issueType)
ws.insert_chart(row, 1, chart, {'x_offset': 0, 'y_offset': 0})
chart = painter.draw_status(reporter.perspective)
ws.insert_chart(row, 1, chart, {'x_offset': 300, 'y_offset': 0})
chart = painter.draw_errors(reporter.errors)
ws.insert_chart(row, 1, chart, {'x_offset': 712, 'y_offset': 0})
row += 15
chart = painter.draw_sprint_burndown(reporter.burndown)
ws.insert_chart(row, 1, chart, {'x_offset': 0, 'y_offset': 0})
chart = painter.draw_sprint_status(reporter.sprint_status)
ws.insert_chart(row, 1, chart, {'x_offset': 712, 'y_offset': 0})
row += 15
chart = painter.draw_evolution(reporter.implemented)
ws.insert_chart(row, 1, chart, {'x_offset': 0, 'y_offset': 0})
row += 15
_format = self.workbook.add_format({'bold': True, 'font_size': 20, 'bg_color': '#60C1CF', 'align': 'center'})
ws.merge_range(xl_range(row, 0, row, 4), 'Backlog Entries', _format)
row += 1
ws.write_row(row, 0,
('Item Id', 'Item reference', 'Time frame', 'Status', 'Item type'),
self.spFormats.column_heading)
for issue in backlog:
row += 1
self._write_issue(ws, row, issue)
@staticmethod
def _write_stats(ws, row, data):
if data['n'] == 0:
ws.write(row, 1, 'n={n}'.format(**data))
elif data['n'] > 1:
ws.write(row, 1,
'n={n}; min={min} days; max={max} days; mean={mean:.0f} days; median={median:.0f} days; '
'std dev={stdev:.0f} days; variance={variance:.0f} days'.format(**data))
else:
ws.write(row, 1,
'n={n}; min={min} days; max={max} days; mean={mean:.0f} days; median={median:.0f} days;'
.format(**data))
def _node_helpdesk(self, node):
print('--------->', node.name)
wb = self.workbook
ws = wb.add_worksheet(node.name)
deck = LabDeck(node, self.data, self.timestamp, self.source)
# reporter = self.reporter
painter = Painter(wb, ws)
ws.set_zoom(80)
ws.set_column(0, 0, 20)
ws.set_column(1, 1, 20)
ws.set_column(2, 2, 122)
ws.set_column(3, 5, 25)
row, col = 0, 0
_heading = self.workbook.add_format({'bold': True, 'font_size': 30,
'bg_color': '#002D67', 'font_color': '#FFE616', 'align': 'center'})
ws.merge_range(xl_range(row, 0, row, 4),
"Help Desk for Node: '{0}'".format(node.name), _heading)
ws.set_row(0, 42)
ws.insert_image(0, 0, settings.logofiware, {'x_scale': 0.5, 'y_scale': 0.5, 'x_offset': 0, 'y_offset': 0})
row += 1
ws.write(row, 0, 'Project Time:', self.spFormats.bold_right)
ws.write(row, 1, '{}'.format(agileCalendar.projectTime()))
ws.write(row, 3, 'Report Date:', self.spFormats.bold_right)
ws.write(row, 4, date.today().strftime('%d-%m-%Y'))
row += 1
ws.write(row, 0, 'Start of Data Analysis:', self.spFormats.bold_right)
ws.write(row, 1, '{}'.format(agileCalendar.projectTime(current_date=self.start)))
row += 1
ws.write(row, 0, 'End of Data Analysis:', self.spFormats.bold_right)
ws.write(row, 1, '{}'.format(agileCalendar.projectTime(current_date=self.end)))
row += 2
_format = self.workbook.add_format({'bold': True, 'font_size': 15, 'color': 'green'})
ws.write(row, 0, 'Node:', self.spFormats.bold_right)
ws.write(row, 1, node.name, _format)
row += 1
ws.write(row, 0, 'Work Mode:', self.spFormats.bold_right)
try:
ws.write(row, 1, deck.node.mode)
except Exception:
# there is no data about the node, therefore we consider the node Inactive
ws.write(row, 1, 'Inactive')
row += 2
ws.write(row, 0, 'HelpDesk Summary:', self.spFormats.bold_right)
ws.write(row, 1, '# Items', self.spFormats.bold_left)
row += 1
reporter = DeckReporter(node.name, deck, start=self.start, end=self.end)
reporter.deck = deck
ws.write(row, 0, 'Composition', self.spFormats.bold_right)
ws.write(row, 1, '{} Issues = {} extRequests + {} Monitors'
.format(len(deck), deck.issueType['extRequest'], deck.issueType['Monitor']))
row += 1
ws.write(row, 0, 'Status', self.spFormats.bold_right)
ws.write(row, 1, '{} Issues = {} Open + {} In Progress + {} Impeded + {} Answered + {} Closed'
.format(len(deck),
deck.status['Open'],
deck.status['In Progress'],
deck.status['Impeded'],
deck.status['Answered'],
deck.status['Closed']
)
)
if len(reporter.deck):
row += 2
chart = painter.draw_composition(reporter.deck.issueType)
ws.insert_chart(row, 1, chart, {'x_offset': 0, 'y_offset': 0})
chart = painter.draw_status(reporter.deck.status)
ws.insert_chart(row, 1, chart, {'x_offset': 250, 'y_offset': 0})
chart = painter.draw_resolution(reporter.deck.resolution)
ws.insert_chart(row, 1, chart, {'x_offset': 550, 'y_offset': 0})
row += 15
row += 2
ws.write(row, 0, 'HelpDesk Set:', self.spFormats.bold_right)
ws.write(row, 1, 'Statistics', self.spFormats.bold_left)
row += 1
ws.write(row, 0, 'All:', self.spFormats.bold_right)
self._write_stats(ws, row, reporter.stats)
if reporter.stats['n'] > 0:
row += 1
ws.write(row, 0, 'Pending Issues:', self.spFormats.bold_right)
self._write_stats(ws, row, reporter.statsOfPending)
if len(reporter.deck):
row += 1
chart = painter.draw_resolution_time(reporter.resolutionTime_graph_data)
ws.insert_chart(row, 1, chart, {'x_offset': 0, 'y_offset': 0})
row += 15
chart = painter.draw_evolution(reporter.evolution_graph_data)
ws.insert_chart(row, 1, chart, {'x_offset': 0, 'y_offset': 0})
row += 15
row += 1
_format = self.workbook.add_format({'bold': True, 'font_size': 16, 'bg_color': '#009999'})
row += 1
_format = self.workbook.add_format({'bold': True, 'font_size': 20, 'bg_color': '#3399FF', 'align': 'center'})
ws.merge_range(xl_range(row, 0, row, 4), 'Help Desk Entries', _format)
ws.write(row+1, 1, 'No entries found for this enabler in the Tech channel of the Help Desk')
_center = self.workbook.add_format({'align': 'center'})
if len(reporter.deck.unresolved):
row += 1
_format = self.workbook.add_format({'bold': True,
'font_size': 20,
'bg_color': '#CCFFE5',
'align': 'center'})
ws.merge_range(xl_range(row, 0, row, 4), 'Unresolved Issues', _format)
row += 1
ws.write_row(row, 0,
('Item Id', 'Channel', 'Summary', 'Status', 'Age (#days)'), self.spFormats.column_heading)
for issue in reporter.deck.unresolved:
row += 1
ws.write_url(row, 0, issue.url, self.spFormats.link, issue.key)
ws.write(row, 1, issue.channel.name)
ws.write(row, 2, issue.name)
ws.write(row, 3, issue.status)
ws.write(row, 4, issue.age, _center)
else:
ws.write(row+1, 0, '>>>>> {} issues found'.format(len(reporter.deck.unresolved)))
row += 1
if len(reporter.deck.resolved):
row += 1
_format = self.workbook.add_format({'bold': True,
'font_size': 20,
'bg_color': '#CCFFE5',
'align': 'center'})
ws.merge_range(xl_range(row, 0, row, 4), 'Resolved Issues', _format)
row += 1
ws.write_row(row, 0,
('Resolution Date', 'Item Id', 'Summary', 'Status-Resolution', 'Age (#days)'),
self.spFormats.column_heading)
for issue in reporter.deck.resolved:
row += 1
ws.write(row, 0, issue.resolutionDate, self.spFormats.date)
ws.write_url(row, 1, issue.url, self.spFormats.link, issue.key)
ws.write(row, 2, issue.name)
ws.write(row, 3, '{0} - {1}'.format(issue.status, issue.resolution))
ws.write(row, 4, issue.age, _center)
else:
ws.write(row+1, 0, '>>>>> {} issues found'.format(len(reporter.deck.resolved)))
def _chapter_helpdesk(self, chapter):
print('------>', chapter.name)
wb = self.workbook
ws = wb.add_worksheet('{} Chapter'.format(chapter.name))
deck = ChapterDeck(chapter, *Data.getChapterHelpDesk(chapter.name))
reporter = TechChapterReporter(chapter, deck, start=self.start, end=self.end)
painter = Painter(wb, ws)
ws.set_zoom(80)
ws.set_column(0, 0, 30)
ws.set_column(1, 1, 122)
ws.set_column(2, 5, 20)
row, col = 0, 0
_heading = self.workbook.add_format({'bold': True, 'font_size': 30,
'bg_color': '#002D67', 'font_color': '#FFE616', 'align': 'center'})
ws.merge_range(xl_range(row, 0, row, 3),
"Help Desk for Chapter: '{0}'".format(chapter.name), _heading)
ws.set_row(0, 42)
ws.insert_image(0, 0, settings.logofiware, {'x_scale': 0.5, 'y_scale': 0.5, 'x_offset': 0, 'y_offset': 0})
row += 1
ws.write(row, 0, 'Project Time:', self.spFormats.bold_right)
ws.write(row, 1, '{}'.format(agileCalendar.projectTime()))
ws.write(row, 2, 'Report Date:', self.spFormats.bold_right)
ws.write(row, 3, date.today().strftime('%d-%m-%Y'))
row += 1
ws.write(row, 0, 'Start of Data Analysis:', self.spFormats.bold_right)
ws.write(row, 1, '{}'.format(agileCalendar.projectTime(current_date=self.start)))
row += 1
ws.write(row, 0, 'End of Data Analysis:', self.spFormats.bold_right)
ws.write(row, 1, '{}'.format(agileCalendar.projectTime(current_date=self.end)))
row += 2
_format = self.workbook.add_format({'bold': True, 'font_size': 15, 'color': 'green'})
ws.write(row, 0, 'Chapter Name:', self.spFormats.bold_right)
ws.write(row, 1, chapter.Name, _format)
row += 1
_format = self.workbook.add_format({'bold': True, 'font_size': 15, 'bg_color': '#60C1CF'})
ws.write(row, 0, 'Chapter Leader:', self.spFormats.bold_right)
ws.write(row, 1, chapter.leader, _format)
ws.write(row, 2, '', _format)
if chapter.architect:
row += 1
ws.write(row, 0, 'Chapter Architect:', self.spFormats.bold_right)
ws.write(row, 1, chapter.architect, _format)
ws.write(row, 2, '', _format)
row += 2
ws.write(row, 0, 'HelpDesk Summary:', self.spFormats.bold_right)
ws.write(row, 1, '# Items', self.spFormats.bold_left)
row += 1
data = deck.issueType
ws.write(row, 0, 'Composition', self.spFormats.bold_right)
ws.write(row, 1, '{0:,} Issues = {extRequest} extRequests + '
'{Monitor:,} Monitors'.format(sum(data.values()), **data))
row += 1
data = deck.status
ws.write(row, 0, 'Status', self.spFormats.bold_right)
ws.write(row, 1,
'{0:,} Issues = {Open} Open + {In Progress} In Progress + {Impeded} Impeded + {Answered} Answered +'
' {Closed} Closed'.format(sum(data.values()), **data))
row += 1
data = deck.resolution
ws.write(row, 0, 'Resolved', self.spFormats.bold_right)
fields = ' + '.join(['{' + '{0}'.format(item) + '} ' + '{0}'.format(item) for item in data])
ws.write(row, 1, '{0:,} Issues = '.format(sum(data.values())) + fields.format(**data))
if len(deck):
row += 2
chart = painter.draw_composition(deck.issueType)
ws.insert_chart(row, 1, chart, {'x_offset': 0, 'y_offset': 0})
chart = painter.draw_status(deck.status)
ws.insert_chart(row, 1, chart, {'x_offset': 250, 'y_offset': 0})
chart = painter.draw_resolution(deck.resolution)
ws.insert_chart(row, 1, chart, {'x_offset': 550, 'y_offset': 0})
row += 17
ws.write(row, 0, 'HelpDesk Set:', self.spFormats.bold_right)
ws.write(row, 1, 'Statistics', self.spFormats.bold_left)
row += 1
ws.write(row, 0, 'All:', self.spFormats.bold_right)
self._write_stats(ws, row, reporter.stats)
if reporter.stats['n'] > 0:
row += 1
ws.write(row, 0, 'Last 60 days:', self.spFormats.bold_right)
self._write_stats(ws, row, reporter.statsOfRecent)
row += 1
ws.write(row, 0, 'Pending Issues:', self.spFormats.bold_right)
self._write_stats(ws, row, reporter.statsOfPending)
if len(deck):
row += 1
chart = painter.draw_resolution_time(reporter.resolutionTime_graph_data)
ws.insert_chart(row, 1, chart, {'x_offset': 0, 'y_offset': 0})
row += 15
chart = painter.draw_evolution(reporter.evolution_graph_data)
ws.insert_chart(row, 1, chart, {'x_offset': 0, 'y_offset': 0})
row += 15
row += 1
_format = self.workbook.add_format({'bold': True, 'font_size': 16, 'bg_color': '#009999'})
row += 1
ws.merge_range(xl_range(row, 1, row, 2), 'Enablers Contribution and Service Time', _format)
row += 1
chart = painter.draw_enablers_contribution(reporter.enablers)
ws.insert_chart(row, 1, chart, {'x_offset': 0, 'y_offset': 0})
chart = painter.draw_enablers_service_time(reporter.enablers)
ws.insert_chart(row, 1, chart, {'x_offset': 500, 'y_offset': 0})
def _lab_channel_help_desk(self):
print('---> Lab Nodes')
wb = self.workbook
ws = wb.add_worksheet('Lab Channel')
deck = self.deck
reporter = self.reporter
painter = Painter(wb, ws)
ws.set_zoom(80)
ws.set_column(0, 0, 30)
ws.set_column(1, 1, 122)
ws.set_column(2, 5, 20)
row, col = 0, 0
_heading = self.workbook.add_format({'bold': True, 'font_size': 30,
'bg_color': '#002D67',
'font_color': '#FFE616',
'align': 'center'})
ws.merge_range(xl_range(row, 0, row, 3),
"Help Desk for Technical Chapters", _heading)
ws.set_row(0, 42)
ws.insert_image(0, 0, settings.logofiware, {'x_scale': 0.5, 'y_scale': 0.5, 'x_offset': 0, 'y_offset': 0})
row += 1
ws.write(row, 0, 'Project Time:', self.spFormats.bold_right)
ws.write(row, 1, '{}'.format(agileCalendar.projectTime()))
ws.write(row, 2, 'Report Date:', self.spFormats.bold_right)
ws.write(row, 3, date.today().strftime('%d-%m-%Y'))
row += 1
ws.write(row, 0, 'Start of Data Analysis:', self.spFormats.bold_right)
ws.write(row, 1, '{}'.format(agileCalendar.projectTime(current_date=self.start)))
row += 1
ws.write(row, 0, 'End of Data Analysis:', self.spFormats.bold_right)
ws.write(row, 1, '{}'.format(agileCalendar.projectTime(current_date=self.end)))
row += 2
_format = self.workbook.add_format({'bold': True, 'font_size': 15, 'bg_color': '#60C1CF'})
ws.write(row, 0, 'Tech Channel Leader:', self.spFormats.bold_right)
ws.write(row, 1, 'FF - Veronika Vlnkova', _format)
ws.write(row, 2, '', _format)
row += 2
ws.write(row, 0, 'Tech Channel Summary', self.spFormats.bold_right)
ws.write(row, 1, '# Items', self.spFormats.bold_left)
row += 1
reporter.deck = deck
data = reporter.deck.issueType
ws.write(row, 0, 'Composition', self.spFormats.bold_right)
ws.write(row, 1, '{0:,} Issues = {extRequest} extRequests + '
'{Monitor:,} Monitors'.format(sum(data.values()), **data))
row += 1
data = reporter.deck.status
ws.write(row, 0, 'Status', self.spFormats.bold_right)
ws.write(row, 1,
'{0:,} Issues = {Open} Open + {In Progress} In Progress + {Impeded} Impeded + {Answered} Answered +'
' {Closed} Closed'.format(sum(data.values()), **data))
row += 1
data = reporter.deck.resolution
ws.write(row, 0, 'Resolved', self.spFormats.bold_right)
fields = ' + '.join(['{' + '{0}'.format(item) + '} ' + '{0}'.format(item) for item in data])
ws.write(row, 1, '{0:,} Issues = '.format(sum(data.values())) + fields.format(**data))
if len(reporter.deck):
row += 2
chart = painter.draw_composition(reporter.deck.issueType)
ws.insert_chart(row, 1, chart, {'x_offset': 0, 'y_offset': 0})
chart = painter.draw_status(reporter.deck.status)
ws.insert_chart(row, 1, chart, {'x_offset': 250, 'y_offset': 0})
chart = painter.draw_resolution(reporter.deck.resolution)
ws.insert_chart(row, 1, chart, {'x_offset': 550, 'y_offset': 0})
row += 17
ws.write(row, 0, 'Channel Set:', self.spFormats.bold_right)
ws.write(row, 1, 'Statistics', self.spFormats.bold_left)
row += 1
ws.write(row, 0, 'All:', self.spFormats.bold_right)
self._write_stats(ws, row, reporter.stats)
if reporter.stats['n'] > 0:
row += 1
ws.write(row, 0, 'Pending Issues:', self.spFormats.bold_right)
self._write_stats(ws, row, reporter.statsOfPending)
if len(reporter.deck):
row += 1
chart = painter.draw_resolution_time(reporter.resolutionTime_graph_data)
ws.insert_chart(row, 1, chart, {'x_offset': 0, 'y_offset': 0})
row += 15
chart = painter.draw_evolution(reporter.evolution_graph_data)
ws.insert_chart(row, 1, chart, {'x_offset': 0, 'y_offset': 0})
row += 15
row += 1
_format = self.workbook.add_format({'bold': True, 'font_size': 13, 'bg_color': '#D0E799'})
ws.merge_range(xl_range(row, 1, row, 2), 'Nodes Contribution and Service Time', _format)
row += 1
chart = painter.draw_nodes_contribution(reporter.nodes)
ws.insert_chart(row, 1, chart, {'x_offset': 0, 'y_offset': 0})
chart = painter.draw_nodes_service_time(reporter.nodes)
ws.insert_chart(row, 1, chart, {'x_offset': 500, 'y_offset': 0})
def lab(self):
print("\n--monitor-- Help-Desk Lab nodes:")
_date = datetime.now().strftime("%Y%m%d-%H%M")
filename = 'FIWARE.helpdesk-lab.report.' + _date + '.xlsx'
myfile = os.path.join(settings.outHome, filename)
self.workbook = xlsxwriter.Workbook(myfile)
self.spFormats = SpreadsheetFormats(self.workbook)
self._lab_channel_help_desk()
nodes = helpdeskNodesBook
for node in nodes:
self._node_helpdesk(helpdeskNodesBook[node])
print('Help-Desk Lab nodes report: W:' + myfile)
self.workbook.close()
class WorkBench:
@staticmethod
def report():
print('report')
reporter = HelpDeskLabReporter()
reporter.lab()
@staticmethod
def snapshot():
print('snapshot')
DataEngine.snapshot(storage=settings.storeHome)
@staticmethod
def upload():
print('upload')
uploader = Uploader()
uploader.upload('helpdesklab', 'report', settings.chapters)
if __name__ == "__main__":
options = {'0': WorkBench.snapshot,
'1': WorkBench.report,
'2': WorkBench.upload,
'E': exit}
while True:
menu = '\nMenu:\n\t0: get snapshot\n\t1: create reports \n\t2: upload report\n\tE: Exit'
choice = input(menu + '\nEnter your choice[0-2,(E)xit] : ')
print('\nChosen option: {}\n'.format(choice))
if choice in ('0', '1', '2', 'E'):
options[choice]()
else:
print('\n\n\nWrong option, please try again... ')
# TODO: the period of time should be a parameter to put on the initialization
# of the scripts and the code should work with or without these data.
| 40.331612 | 119 | 0.551113 |
b5678081ceee9c4ade219212a828fe9a02d02676 | 1,027 | py | Python | flask_talisman/__init__.py | eelkevdbos/flask-talisman | a7a36f58c0b2f75ed6c0b5d1dc56fb97dae3414a | [
"Apache-2.0"
] | null | null | null | flask_talisman/__init__.py | eelkevdbos/flask-talisman | a7a36f58c0b2f75ed6c0b5d1dc56fb97dae3414a | [
"Apache-2.0"
] | null | null | null | flask_talisman/__init__.py | eelkevdbos/flask-talisman | a7a36f58c0b2f75ed6c0b5d1dc56fb97dae3414a | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .talisman import (
ALLOW_FROM, DEFAULT_CSP_POLICY, DEFAULT_DOCUMENT_POLICY,
DEFAULT_FEATURE_POLICY, DEFAULT_PERMISSIONS_POLICY, DENY,
GOOGLE_CSP_POLICY, NONCE_LENGTH, SAMEORIGIN, Talisman)
__all__ = (
'ALLOW_FROM',
'DEFAULT_CSP_POLICY',
'DEFAULT_DOCUMENT_POLICY',
'DEFAULT_FEATURE_POLICY',
'DEFAULT_PERMISSIONS_POLICY',
'DENY',
'GOOGLE_CSP_POLICY',
'NONCE_LENGTH',
'SAMEORIGIN',
'Talisman',
)
| 32.09375 | 74 | 0.742941 |
73969307a5e8d8e1c7e123e57c96d62cc3750ba5 | 98,580 | py | Python | python/pyspark/rdd.py | cchung100m/spark | 1ed1b4d8e1a5b9ca0ec8b15f36542d7a63eebf94 | [
"Apache-2.0"
] | 2 | 2019-03-02T16:08:01.000Z | 2020-11-25T10:10:17.000Z | python/pyspark/rdd.py | cchung100m/spark | 1ed1b4d8e1a5b9ca0ec8b15f36542d7a63eebf94 | [
"Apache-2.0"
] | 1 | 2020-05-22T22:46:36.000Z | 2020-05-29T01:53:42.000Z | python/pyspark/rdd.py | cchung100m/spark | 1ed1b4d8e1a5b9ca0ec8b15f36542d7a63eebf94 | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.java_gateway import local_connect_and_auth
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
from pyspark.util import fail_on_stopiteration
__all__ = ["RDD"]
class PythonEvalType(object):
"""
Evaluation type of python rdd.
These values are internal to PySpark.
These values should match values in org.apache.spark.api.python.PythonEvalType.
"""
NON_UDF = 0
SQL_BATCHED_UDF = 100
SQL_SCALAR_PANDAS_UDF = 200
SQL_GROUPED_MAP_PANDAS_UDF = 201
SQL_GROUPED_AGG_PANDAS_UDF = 202
SQL_WINDOW_AGG_PANDAS_UDF = 203
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version_info >= (3, 2, 3) and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MiB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(sock_info, serializer):
(sockfile, sock) = local_connect_and_auth(*sock_info)
# The RDD materialization time is unpredicable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sockfile)
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self):
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
L{spark.dynamicAllocation.cachedExecutorIdleTimeout} to a high value.
The checkpoint directory set through L{SparkContext.setCheckpointDir()} is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self):
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(fail_on_stopiteration(f), iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(fail_on_stopiteration(f), iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(fail_on_stopiteration(f), iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
.. note:: This method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, True)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = unicode(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
f = fail_on_stopiteration(f)
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(sock_info, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
f = fail_on_stopiteration(f)
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
op = fail_on_stopiteration(op)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
seqOp = fail_on_stopiteration(seqOp)
combOp = fail_on_stopiteration(combOp)
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from an RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first parameter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
try:
yield next(iterator)
except StopIteration:
return
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all.
.. note:: an RDD may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
.. note:: this method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
func = fail_on_stopiteration(func)
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one (e.g., merges
the lists)
To avoid memory allocation, both mergeValue and mergeCombiners are allowed to
modify and return their first argument instead of creating a new C.
In addition, users can control the partitioning of the output RDD.
.. note:: V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 2)])
>>> def to_list(a):
... return [a]
...
>>> def append(a, b):
... a.append(b)
... return a
...
>>> def extend(a, b):
... a.extend(b)
... return a
...
>>> sorted(x.combineByKey(to_list, append, extend).collect())
[('a', [1, 2]), ('b', [1])]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
.. note:: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(PickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<https://doi.org/10.1145/2452376.2452456>`_.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context) as css:
sock_info = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd())
return _load_from_socket(sock_info, self._jrdd_deserializer)
def barrier(self):
"""
.. note:: Experimental
Marks the current stage as a barrier stage, where Spark must launch all tasks together.
In case of a task failure, instead of only restarting the failed task, Spark will abort the
entire stage and relaunch all tasks for this stage.
The barrier execution mode feature is experimental and it only handles limited scenarios.
Please read the linked SPIP and design docs to understand the limitations and future plans.
:return: an :class:`RDDBarrier` instance that provides actions within a barrier stage.
.. seealso:: :class:`BarrierTaskContext`
.. seealso:: `SPIP: Barrier Execution Mode
<http://jira.apache.org/jira/browse/SPARK-24374>`_
.. seealso:: `Design Doc <https://jira.apache.org/jira/browse/SPARK-24582>`_
.. versionadded:: 2.4.0
"""
return RDDBarrier(self)
def _is_barrier(self):
"""
Whether this RDD is in a barrier stage.
"""
return self._jrdd.rdd().isBarrier()
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
class RDDBarrier(object):
"""
.. note:: Experimental
Wraps an RDD in a barrier stage, which forces Spark to launch tasks of this stage together.
:class:`RDDBarrier` instances are created by :func:`RDD.barrier`.
.. versionadded:: 2.4.0
"""
def __init__(self, rdd):
self.rdd = rdd
def mapPartitions(self, f, preservesPartitioning=False):
"""
.. note:: Experimental
Returns a new RDD by applying a function to each partition of the wrapped RDD,
where tasks are launched together in a barrier stage.
The interface is the same as :func:`RDD.mapPartitions`.
Please see the API doc there.
.. versionadded:: 2.4.0
"""
def func(s, iterator):
return f(iterator)
return PipelinedRDD(self.rdd, func, preservesPartitioning, isFromBarrier=True)
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False, isFromBarrier=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
self.is_barrier = prev._is_barrier() or isFromBarrier
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer, profiler)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func,
self.preservesPartitioning, self.is_barrier)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _is_barrier(self):
return self.is_barrier
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| 38.38785 | 100 | 0.583739 |
bcca24e7e4f877ed1981607759a5322fe34eedfa | 1,204 | py | Python | src/ir_encoder/ir_command_merge.py | deaddevils/core | 286541c974f96656c3340cb1dd37df116e3dfd67 | [
"MIT"
] | 5 | 2021-01-09T17:08:05.000Z | 2022-03-29T08:00:05.000Z | src/ir_encoder/ir_command_merge.py | deaddevils/core | 286541c974f96656c3340cb1dd37df116e3dfd67 | [
"MIT"
] | null | null | null | src/ir_encoder/ir_command_merge.py | deaddevils/core | 286541c974f96656c3340cb1dd37df116e3dfd67 | [
"MIT"
] | 15 | 2020-12-07T03:50:58.000Z | 2022-03-29T07:59:32.000Z | #!/usr/bin/env python
# coding=utf-8
#
# created by strawmanbobi 2016-11-10
import sys
import os
for root, dirs, files in os.walk(sys.argv[1]):
for i in files:
fileType = i.split('.')
if cmp(fileType[1], 'bin') == 0:
print root + i
print "========================list==========================="
remotePath = sys.argv[2]
fileType2 = remotePath.split('.')
if cmp(fileType2[-1], 'bin') == 0:
protocolType = fileType2[0].split('#')
print(protocolType[0])
print(fileType[0])
if cmp(protocolType[0].split('/')[-1], fileType[0]) == 0:
outName = remotePath.split('#')
binary = open(sys.argv[3] + "/irda_" + outName[0].split('/')[-1] + "_" + outName[1], 'wb')
prot_file = open(root + i, "rb")
remote_file = open(remotePath, "rb")
binary.write(prot_file.read())
binary.write(remote_file.read())
binary.close()
prot_file.close()
remote_file.close()
print remotePath
| 35.411765 | 110 | 0.450166 |
b78b321bce37029496e2e13761c32f870158de14 | 6,251 | py | Python | client/modules/Weather.py | overflOw11/lola | fc0e85305a07c93d4cc684543b7d5ac722df8758 | [
"MIT"
] | null | null | null | client/modules/Weather.py | overflOw11/lola | fc0e85305a07c93d4cc684543b7d5ac722df8758 | [
"MIT"
] | null | null | null | client/modules/Weather.py | overflOw11/lola | fc0e85305a07c93d4cc684543b7d5ac722df8758 | [
"MIT"
] | null | null | null | # -*- coding: utf-8-*-
import re
import datetime
import struct
import urllib
import feedparser
import requests
import bs4
import logging
from client.app_utils import getTimezone
from semantic.dates import DateService
WORDS = ["TEMPS", "MÉTÉO", "AUJOURD'HUI", "DEMAIN"]
def replaceAcronyms(text):
"""
Replaces some commonly-used acronyms for an improved verbal weather report.
"""
def parseDirections(text):
words = {
'N': 'nord',
'S': 'sud',
'E': 'est',
'W': 'ouest',
}
output = [words[w] for w in list(text)]
return ' '.join(output)
acronyms = re.findall(r'\b([NESW]+)\b', text)
for w in acronyms:
text = text.replace(w, parseDirections(w))
text = re.sub(u'° C', u'degrés celsius', text)
text = re.sub(u'km\/h', u'kilomètres par heure', text)
text = re.sub(u'hPa', u'hecto pascal', text)
return text
def get_locations():
r = requests.get('http://www.wunderground.com/about/faq/' +
'international_cities.asp')
soup = bs4.BeautifulSoup(r.text)
data = soup.find(id="inner-content").find('pre').string
# Data Stucture:
# 00 25 location
# 01 1
# 02 2 region
# 03 1
# 04 2 country
# 05 2
# 06 4 ID
# 07 5
# 08 7 latitude
# 09 1
# 10 7 logitude
# 11 1
# 12 5 elevation
# 13 5 wmo_id
s = struct.Struct("25s1s2s1s2s2s4s5s7s1s7s1s5s5s")
for line in data.splitlines()[3:]:
row = s.unpack_from(line)
info = {'name': row[0].strip(),
'region': row[2].strip(),
'country': row[4].strip(),
'latitude': float(row[8].strip()),
'logitude': float(row[10].strip()),
'elevation': int(row[12].strip()),
'id': row[6].strip(),
'wmo_id': row[13].strip()}
yield info
def get_forecast_by_name(location_name):
entries = feedparser.parse("http://french.wunderground.com/auto/rss_full/%s"
% urllib.quote(location_name))['entries']
if entries:
# We found weather data the easy way
return entries
else:
# We try to get weather data via the list of stations
for location in get_locations():
if location['name'] == location_name:
return get_forecast_by_wmo_id(location['wmo_id'])
def get_forecast_by_wmo_id(wmo_id):
return feedparser.parse("http://french.wunderground.com/auto/" +
"rss_full/global/stations/%s.xml"
% wmo_id)['entries']
def extractDate(text, now):
text = text.upper()
days = ["LUNDI", "MARDI", "MERCREDI", "JEUDI", "VENDREDI", "SAMEDI", "DIMANCHE"]
weekday = now.weekday()
for i in range(len(days)):
if re.search(days[i], text, re.UNICODE):
if i == weekday:
result = 0
elif i > weekday:
result = i-weekday
else:
result = i+7-weekday
return {'weekday': days[i].title(), 'date': now+datetime.timedelta(days=result)}
elif re.search("DEMAIN", text, re.UNICODE):
result = now+datetime.timedelta(days=1)
return {'weekday': days[result.weekday()].title(), 'date': result}
elif re.search("AUJOURD'HUI", text, re.UNICODE):
return {'weekday': days[now.weekday()].title(), 'date': now}
def handle(text, mic, profile):
"""
Responds to user-input, typically speech text, with a summary of
the relevant weather for the requested date (typically, weather
information will not be available for days beyond tomorrow).
Arguments:
text -- user-input, typically transcribed speech
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
logger = logging.getLogger(__name__)
forecast = None
if 'wmo_id' in profile:
forecast = get_forecast_by_wmo_id(str(profile['wmo_id']))
elif 'location' in profile:
forecast = get_forecast_by_name(str(profile['location']))
if not forecast:
mic.say("Oups, je ne peux pas acceder à vos informations. Vérifier que vous avez bien renseigné votre localisation.")
return
tz = getTimezone(profile)
now = datetime.datetime.now(tz=tz)
extract = extractDate(text, now)
if not extract:
weekday = extractDate("Aujourd'hui", now)['weekday']
date = now
else:
weekday = extract['weekday']
date = extract['date']
if date.weekday() == now.weekday():
date_keyword = "Aujourd'hui"
elif date.weekday() == (now.weekday() + 1) % 7:
date_keyword = "Demain"
else:
date_keyword = weekday
output = None
for entry in forecast:
#try:
date_desc = entry['title'].split()[0].strip().lower()
if date_desc == u'prévisions':
# For global forecasts
date_desc = entry['title'].split()[3].strip().lower()
weather_desc = entry['summary']
logger.debug("PREVISIONS : " + date_desc + " " + weather_desc)
elif date_desc == u'conditions':
# For first item of global forecasts
logger.debug("CONDITIONS")
continue
else:
# US forecasts
weather_desc = entry['summary'].split('-')[1]
logger.debug("OTHER : " + weather_desc)
logger.debug("EGALITE ? " + weekday + " et " + date_desc.title())
if weekday == date_desc.title():
output = u"Les prévisions pour " + date_keyword + u" sont : " + weather_desc + "."
break
#except:
#continue
if output:
output = replaceAcronyms(output)
mic.say(output)
else:
mic.say(
"Désolé, j'ai eu un problème.")
def isValid(text):
"""
Returns True if the text is related to the weather.
Arguments:
text -- user-input, typically transcribed speech
"""
text = text.lower()
return bool(re.search(u'(météo|températures?|prévisions?|chaud|temps|froid|veste|manteau|pluie|pleut)', text, re.IGNORECASE))
| 31.570707 | 129 | 0.579587 |
5cd04029a528f9573a2f96f38e3c19771f05a6b5 | 686 | py | Python | app/core/migrations/0003_ingredient.py | coredmp95/recipe-app-api | f8d7ffe2f1c58143dc0c083c9491f338a283a978 | [
"MIT"
] | null | null | null | app/core/migrations/0003_ingredient.py | coredmp95/recipe-app-api | f8d7ffe2f1c58143dc0c083c9491f338a283a978 | [
"MIT"
] | null | null | null | app/core/migrations/0003_ingredient.py | coredmp95/recipe-app-api | f8d7ffe2f1c58143dc0c083c9491f338a283a978 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.14 on 2019-11-18 15:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.583333 | 118 | 0.618076 |
cf43597c5a2149d0b14a44adf64925b05da33e63 | 1,023 | py | Python | task1.py | akselhm/BigDataProject | 21ea7a503ce6eaf8daeb777a55fb22badd938013 | [
"MIT"
] | 1 | 2020-07-04T14:12:09.000Z | 2020-07-04T14:12:09.000Z | task1.py | akselhm/BigDataProject | 21ea7a503ce6eaf8daeb777a55fb22badd938013 | [
"MIT"
] | null | null | null | task1.py | akselhm/BigDataProject | 21ea7a503ce6eaf8daeb777a55fb22badd938013 | [
"MIT"
] | null | null | null | from pyspark import SparkContext, SparkConf
# Configure Spark
sparkConf = SparkConf().setAppName("Yelp").setMaster("local")
sc = SparkContext(conf = sparkConf)
# Set data folder, inputs and output
folder_name = "./data/"
input_businesses = "yelp_businesses.csv.gz"
input_reviewers = "yelp_top_reviewers_with_reviews.csv.gz"
input_users = "yelp_top_users_friendship_graph.csv.gz"
output_file = "result1.tsv"
# Load into 3 separate RDDs
businessesRDD = sc.textFile(folder_name + input_businesses)
reviewersRDD = sc.textFile(folder_name + input_reviewers)
usersRDD = sc.textFile(folder_name + input_users)
# Count lines in each RDD and save to text file
businesses_number_of_records = businessesRDD.count()
reviewers_number_of_records = reviewersRDD.count()
users_number_of_records = usersRDD.count()
lines = [businesses_number_of_records, reviewers_number_of_records, users_number_of_records]
lines_rdd = sc.parallelize(lines)
lines_rdd.repartition(1).saveAsTextFile(folder_name + output_file)
| 37.888889 | 93 | 0.792766 |
015425e155b502d9b0213b025007b30b35cee48e | 17,319 | py | Python | falcon/asgi/stream.py | jssparas/falcon | c550eb39575ce46d43fa7af3d838aebd08332c6b | [
"Apache-2.0"
] | null | null | null | falcon/asgi/stream.py | jssparas/falcon | c550eb39575ce46d43fa7af3d838aebd08332c6b | [
"Apache-2.0"
] | 34 | 2021-01-27T18:02:31.000Z | 2021-01-27T18:08:37.000Z | falcon/asgi/stream.py | jssparas/falcon | c550eb39575ce46d43fa7af3d838aebd08332c6b | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 by Kurt Griffiths
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ASGI BoundedStream class."""
from falcon.errors import OperationNotAllowed
__all__ = ['BoundedStream']
class BoundedStream:
"""File-like input object for reading the body of the request, if any.
This class implements coroutine functions for asynchronous reading or
iteration, but otherwise provides an interface similar to that defined by
:class:`io.IOBase`.
If the request includes a Content-Length header, the number of bytes in the
stream will be truncated to the length specified by the header. Otherwise,
the stream will yield data until the ASGI server indicates that no more
bytes are available.
For large request bodies, the preferred method of using the stream object is
as an asynchronous iterator. In this mode, each body chunk is simply yielded
in its entirety, as it is received from the ASGI server. Because no data is
buffered by the framework, this is the most memory-efficient way of reading
the request body::
# If the request body is empty or has already be consumed, the iteration
# will immediately stop without yielding any data chunks. Otherwise, a
# series of byte # strings will be yielded until the entire request
# body has been yielded or the client disconnects.
async for data_chunk in req.stream
pass
The stream object also supports asynchronous ``read()`` and
``readall()`` methods::
# Read all of the data at once; use only when you are confident
# that the request body is small enough to not eat up all of
# your memory. For small bodies, this is the most performant
# option.
data = await req.stream.readall()
# ...or call read() without arguments
data = await req.stream.read()
# ...or read the data in chunks. You may choose to read more
# or less than 32 KiB as shown in this example. But note that
# this approach will generally be less efficient as compared
# to async iteration, resulting in more usage and
# copying of memory.
while True:
data_chunk = await req.stream.read(32 * 1024)
if not data_chunk:
break
Warning:
Apps may not use both ``read()`` and the asynchronous iterator
interface to consume the same request body; the only time that
it is safe to do so is when one or the other method is used to
completely read the entire body *before* the other method is
even attempted. Therefore, it is important to always call
:meth:`~.exhaust` or :meth:`~.close` if a body has only been
partially read and the remaining data is to be ignored.
Note:
The stream object provides a convenient abstraction over the series of
body chunks contained in any ASGI "http.request" events received by the
app. As such, some request body data may be temporarily buffered in
memory during and between calls to read from the stream. The framework
has been designed to minimize the amount of data that must be buffered
in this manner.
Args:
receive (awaitable): ASGI awaitable callable that will yield a new
request event dictionary when one is available.
Keyword Args:
first_event (dict): First ASGI event received from the client,
if one was preloaded (default ``None``).
content_length (int): Expected content length of the stream, derived
from the Content-Length header in the request (if available).
"""
__slots__ = [
'_buffer',
'_bytes_remaining',
'_closed',
'_iteration_started',
'_pos',
'_receive',
]
def __init__(self, receive, first_event=None, content_length=None):
self._closed = False
self._iteration_started = False
self._receive = receive
# NOTE(kgriffs): Outside of testing, first_event will always be set
# and we also assume a body is expected, otherwise why bother
# creating a stream object to read it? But just in case this
# object is created in other cases, use "in" here rather than
# EAFP.
if first_event and 'body' in first_event:
first_chunk = first_event['body']
else:
first_chunk = b''
if content_length is None:
self._buffer = first_chunk
# NOTE(kgriffs): If length is unknown we just set remaining bytes
# to a ridiculously high number so that we will keep reading
# until we get an event with more_body == False. We do not
# use sys.maxsize because 2**31 on 32-bit systems is not
# a large enough number (someone may have an API that accepts
# multi-GB payloads).
self._bytes_remaining = 2**63
else:
if len(first_chunk) > content_length:
self._buffer = first_chunk[:content_length]
else:
self._buffer = first_chunk
self._bytes_remaining = content_length - len(self._buffer)
self._pos = len(self._buffer)
if first_event and self._bytes_remaining:
# NOTE(kgriffs): Override if the event says there's no more data
if not ('more_body' in first_event and first_event['more_body']):
self._bytes_remaining = 0
def __aiter__(self):
# NOTE(kgriffs): This returns an async generator, but that's OK because
# it also implements the iterator protocol defined in PEP 492, albeit
# in a more efficient way than a regular async iterator.
return self._iter_content()
# -------------------------------------------------------------------------
# These methods are included to improve compatibility with Python's
# standard "file-like" IO interface.
# -------------------------------------------------------------------------
# NOTE(kgriffs): According to the Python docs, NotImplementedError is not
# meant to be used to mean "not supported"; rather, the method should
# just be left undefined; hence we do not implement readline(),
# readlines(), __iter__(), __next__(), flush(), seek(),
# truncate(), __del__().
def fileno(self):
"""Raise an instance of OSError since a file descriptor is not used."""
raise OSError('This IO object does not use a file descriptor')
def isatty(self):
"""Return ``False`` always."""
return False
def readable(self):
"""Return ``True`` always."""
return True
def seekable(self):
"""Return ``False`` always."""
return False
def writable(self):
"""Return ``False`` always."""
return False
def tell(self):
"""Return the number of bytes read from the stream so far."""
return self._pos
@property
def closed(self):
return self._closed
# -------------------------------------------------------------------------
@property
def eof(self):
return not self._buffer and self._bytes_remaining == 0
def close(self):
"""Clear any buffered data and close this stream.
Once the stream is closed, any operation on it will
raise an instance of :class:`ValueError`.
As a convenience, it is allowed to call this method more than
once; only the first call, however, will have an effect.
"""
if not self._closed:
self._buffer = b''
self._bytes_remaining = 0
self._closed = True
async def exhaust(self):
"""Consume and immediately discard any remaining data in the stream."""
if self._closed:
raise ValueError(
'This stream is closed; no further operations on it are permitted.'
)
self._buffer = b''
while self._bytes_remaining > 0:
event = await self._receive()
if event['type'] == 'http.disconnect':
self._bytes_remaining = 0
else:
try:
num_bytes = len(event['body'])
except KeyError:
# NOTE(kgriffs): The ASGI spec states that 'body' is optional.
num_bytes = 0
self._bytes_remaining -= num_bytes
self._pos += num_bytes
if not ('more_body' in event and event['more_body']):
self._bytes_remaining = 0
# Immediately dereference the data so it can be discarded ASAP
event = None
# NOTE(kgriffs): Ensure that if we read more than expected, this
# value is normalized to zero.
self._bytes_remaining = 0
async def readall(self):
"""Read and return all remaining data in the request body.
Warning:
Only use this method when you can be certain that you have
enough free memory for the entire request body, and that you
have configured your web server to limit request bodies to a
reasonable size (to guard against malicious requests).
Returns:
bytes: The request body data, or ``b''`` if the body is empty or
has already been consumed.
"""
if self._closed:
raise OperationNotAllowed(
'This stream is closed; no further operations on it are permitted.'
)
if self.eof:
return b''
if self._buffer:
next_chunk = self._buffer
self._buffer = b''
chunks = [next_chunk]
else:
chunks = []
while self._bytes_remaining > 0:
event = await self._receive()
# PERF(kgriffs): Use try..except because we normally expect the
# 'body' key to be present.
try:
next_chunk = event['body']
except KeyError:
pass
else:
next_chunk_len = len(next_chunk)
if next_chunk_len <= self._bytes_remaining:
chunks.append(next_chunk)
self._bytes_remaining -= next_chunk_len
else:
# NOTE(kgriffs): Do not read more data than we are
# expecting. This *should* never happen if the
# server enforces the content-length header, but
# it is better to be safe than sorry.
chunks.append(next_chunk[:self._bytes_remaining])
self._bytes_remaining = 0
# NOTE(kgriffs): This also handles the case of receiving
# the event: {'type': 'http.disconnect'}
if not ('more_body' in event and event['more_body']):
self._bytes_remaining = 0
data = chunks[0] if len(chunks) == 1 else b''.join(chunks)
self._pos += len(data)
return data
async def read(self, size=None):
"""Read some or all of the remaining bytes in the request body.
Warning:
A size should always be specified, unless you can be certain that
you have enough free memory for the entire request body, and that
you have configured your web server to limit request bodies to a
reasonable size (to guard against malicious requests).
Warning:
Apps may not use both ``read()`` and the asynchronous iterator
interface to consume the same request body; the only time that
it is safe to do so is when one or the other method is used to
completely read the entire body *before* the other method is
even attempted. Therefore, it is important to always call
:meth:`~.exhaust` or :meth:`~.close` if a body has only been
partially read and the remaining data is to be ignored.
Keyword Args:
size (int): The maximum number of bytes to read. The actual
amount of data that can be read will depend on how much is
available, and may be smaller than the amount requested. If the
size is -1 or not specified, all remaining data is read and
returned.
Returns:
bytes: The request body data, or ``b''`` if the body is empty or
has already been consumed.
"""
if self._closed:
raise OperationNotAllowed(
'This stream is closed; no further operations on it are permitted.'
)
if self.eof:
return b''
if size is None or size == -1:
return await self.readall()
if size <= 0:
return b''
if self._buffer:
num_bytes_available = len(self._buffer)
chunks = [self._buffer]
else:
num_bytes_available = 0
chunks = []
while self._bytes_remaining > 0 and num_bytes_available < size:
event = await self._receive()
# PERF(kgriffs): Use try..except because we normally expect the
# 'body' key to be present.
try:
next_chunk = event['body']
except KeyError:
pass
else:
next_chunk_len = len(next_chunk)
if next_chunk_len <= self._bytes_remaining:
chunks.append(next_chunk)
self._bytes_remaining -= next_chunk_len
num_bytes_available += next_chunk_len
else:
# NOTE(kgriffs): Do not read more data than we are
# expecting. This *should* never happen, but better
# safe than sorry.
chunks.append(next_chunk[:self._bytes_remaining])
self._bytes_remaining = 0
num_bytes_available += self._bytes_remaining
# NOTE(kgriffs): This also handles the case of receiving
# the event: {'type': 'http.disconnect'}
if not ('more_body' in event and event['more_body']):
self._bytes_remaining = 0
self._buffer = chunks[0] if len(chunks) == 1 else b''.join(chunks)
if num_bytes_available <= size:
data = self._buffer
self._buffer = b''
else:
data = self._buffer[:size]
self._buffer = self._buffer[size:]
self._pos += len(data)
return data
async def _iter_content(self):
if self._closed:
raise OperationNotAllowed(
'This stream is closed; no further operations on it are permitted.'
)
if self.eof:
return
if self._iteration_started:
raise OperationNotAllowed('This stream is already being iterated over.')
self._iteration_started = True
if self._buffer:
next_chunk = self._buffer
self._buffer = b''
self._pos += len(next_chunk)
yield next_chunk
while self._bytes_remaining > 0:
event = await self._receive()
# PERF(kgriffs): Use try...except because we normally expect the
# 'body' key to be present.
try:
next_chunk = event['body']
except KeyError:
pass
else:
# NOTE(kgriffs): No need to yield empty body chunks.
if not next_chunk:
continue
next_chunk_len = len(next_chunk)
if next_chunk_len <= self._bytes_remaining:
self._bytes_remaining -= next_chunk_len
self._pos += next_chunk_len
else:
# NOTE(kgriffs): We received more data than expected,
# so truncate to the expected length.
next_chunk = next_chunk[:self._bytes_remaining]
self._pos += self._bytes_remaining
self._bytes_remaining = 0
yield next_chunk
# NOTE(kgriffs): Per the ASGI spec, more_body is optional
# and should be considered False if not present.
# NOTE(kgriffs): This also handles the case of receiving
# the event: {'type': 'http.disconnect'}
# PERF(kgriffs): event.get() is more elegant, but uses a
# few more CPU cycles.
if not ('more_body' in event and event['more_body']):
self._bytes_remaining = 0
| 37.487013 | 84 | 0.583175 |
c3f5cca54c4067bcecfc2aada04da1af4267a2f8 | 174 | py | Python | Python OOP Retake Exam - 19 Dec 2020/problem1/supply/water_supply.py | DiyanKalaydzhiev23/OOP---Python | 7ac424d5fb08a6bd28dc36593e45d949b3ac0cd0 | [
"MIT"
] | null | null | null | Python OOP Retake Exam - 19 Dec 2020/problem1/supply/water_supply.py | DiyanKalaydzhiev23/OOP---Python | 7ac424d5fb08a6bd28dc36593e45d949b3ac0cd0 | [
"MIT"
] | null | null | null | Python OOP Retake Exam - 19 Dec 2020/problem1/supply/water_supply.py | DiyanKalaydzhiev23/OOP---Python | 7ac424d5fb08a6bd28dc36593e45d949b3ac0cd0 | [
"MIT"
] | null | null | null | from problem1.supply.supply import Supply
class WaterSupply(Supply):
def __init__(self):
self.needs_increase = 40
super().__init__(self.needs_increase) | 21.75 | 45 | 0.712644 |
7ee3906514caf03f988d10e7d6d62b798244fe76 | 4,952 | py | Python | infoblox/_internal/srv.py | dylanfmarquis/python-infoblox | 046b2b6b5a3974f86a22de934560254618f652c0 | [
"MIT"
] | 5 | 2016-08-23T20:34:21.000Z | 2022-03-22T01:52:08.000Z | infoblox/_internal/srv.py | dylanfmarquis/python-infoblox | 046b2b6b5a3974f86a22de934560254618f652c0 | [
"MIT"
] | 9 | 2016-08-24T20:29:47.000Z | 2019-03-01T09:13:08.000Z | infoblox/_internal/srv.py | dylanfmarquis/python-infoblox | 046b2b6b5a3974f86a22de934560254618f652c0 | [
"MIT"
] | null | null | null | """
A warapper around record:srv objects. This allows for the modification of DNS
SRV objects
WAPI documentation can be found here:
https://ipam.illinois.edu/wapidoc/objects/record.srv.html
"""
import json
class _srv(object):
def __init__(self, infoblox_, name, port):
"""
class constructor - Automatically called on class instantiation
input infoblox_ (object) Parent class object
name (string) DNS name of CNAME
output void (void)
"""
self.infoblox_ = infoblox_
self.name = name
self.port = port
self._ref_ = self._ref()
def _ref(self):
"""
_ref - Get _ref for a specified CNAME record
input void (void)
output host _ref _ref ID for CNAME record
"""
try:
return self.fetch()['_ref']
except Exception:
return None
def fetch(self, **return_fields):
"""
fetch - Retrieve all information from a specified SRV record
input return_fields (dict) Key value pairs of data to be returned
output resp (parsed json) Parsed JSON response
"""
return_query = ','.join([k for k in return_fields.keys()
if return_fields[k]])
query = "record:srv?name~={0}".format(self.name)
if return_query:
query += '&_return_fields=' + return_query
resp = self.infoblox_.get(query)
if resp.status_code != 200:
try:
return self.infoblox_.__caller__(
'Could not retrieve SRV _ref for {0} - Status {1}'
.format(self.name, resp.status_code), resp.status_code)
except Exception:
return resp.status_code
try:
return json.loads(resp.text)[0]
except (ValueError, IndexError):
return None
def add(self, target, weight=0, priority=0):
"""
add - add target to srv record
input target (string) DNS target for srv record
output 0 (int) Target successfully added
"""
payload = '{{"target": "{0}", "weight": {1}, "name": "{2}", "priority": {3}'\
',"port": {4}}}'\
.format(target, weight, self.name, priority, self.port)
resp = self.infoblox_.post('record:srv', payload)
if resp.status_code != 201:
try:
return self.infoblox_.__caller__('Error creating srv record '
'{0} - Status: {1}'
.format(self.name,
resp.status_code),
resp.status_code)
except Exception:
return resp.status_code
return 0
def delete(self):
"""
del - Delete a SRV record within Infoblox
input void (void)
output 0 (int) Successful deletion
errno (int) Error code of API call
"""
resp = self.infoblox_.delete(self._ref_)
if resp.status_code != 200:
try:
return self.infoblox_.__caller__('Error deleting SRV record '
'{0} for {1} - Status: {2}'
.format(self.name,
resp.status_code),
resp.status_code)
except Exception:
return resp.status_code
return 0
def update(self, target=None, weight=None, priority=None):
"""
add - add target to srv record
input target (string) DNS target for srv record
output 0 (int) Target successfully added
"""
rec = self.fetch()
if target is None:
target = rec['target']
if weight is None:
weight = rec['weight']
if priority is None:
priority = rec['priority']
payload = '{{"target": "{0}", "weight": {1}, "name": "{2}", "priority": {3}'\
',"port": {4}}}'\
.format(target, weight, self.name, priority, self.port)
resp = self.infoblox_.put(self._ref_, payload)
if resp.status_code != 200:
try:
return self.infoblox_.__caller__('Error updating srv record '
'{0} - Status: {1}'
.format(self.name,
resp.status_code),
resp.status_code)
except Exception:
return resp.status_code
return 0
| 37.233083 | 85 | 0.476373 |
e0e7984b4a667556824f4bcd798c3c0c473a28dd | 785 | py | Python | Desafios/Mundo 3/ex079.py | lucasllimati/CursoEmVideo | 01d75626baf8b82f141d62f681e55a9bda0099fd | [
"MIT"
] | null | null | null | Desafios/Mundo 3/ex079.py | lucasllimati/CursoEmVideo | 01d75626baf8b82f141d62f681e55a9bda0099fd | [
"MIT"
] | null | null | null | Desafios/Mundo 3/ex079.py | lucasllimati/CursoEmVideo | 01d75626baf8b82f141d62f681e55a9bda0099fd | [
"MIT"
] | null | null | null | # 79
# Crie um programa onde o usuário possa digitar vários valores numéricos e cadastre-os em uma lista. Caso o número já exista lá dentro, ele não será adicionado. No final, serão exibidos todos os valores únicos digitados, em ordem crescente.
lista = list()
while True:
num = int(input('Digite um valor: '))
if num not in lista:
lista.append(num)
print('\33[32mValor adicionado com sucesso!\33[m', end='')
else:
print('\33[31mValor duplicado! Não será adicionado.\33[m', end='')
resp = ' '
while resp not in 'SN':
resp = str(input('\nQuer continuar? [S/N] ')).upper().strip()[0]
print('¨'*30)
if resp == 'N':
break
print()
print('-='*30)
lista.sort()
print(f'Você digitou os valores \33[36m{lista}\33[m')
| 34.130435 | 240 | 0.631847 |
7ad67a3e47c82bf9e408c8b91daaff70f4e3e284 | 7,093 | py | Python | arty_a7/extra1/workshop_extra1.py | fjullien/migen_litex_tutorials | ce128a94fc3ddae534c39430f10fb4caa72d1cab | [
"MIT"
] | null | null | null | arty_a7/extra1/workshop_extra1.py | fjullien/migen_litex_tutorials | ce128a94fc3ddae534c39430f10fb4caa72d1cab | [
"MIT"
] | null | null | null | arty_a7/extra1/workshop_extra1.py | fjullien/migen_litex_tutorials | ce128a94fc3ddae534c39430f10fb4caa72d1cab | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from migen import *
from migen.genlib.cdc import *
from litex.soc.interconnect import stream
from litex.build.generic_platform import *
from litex_boards.platforms import arty
from litex.soc.cores.clock import *
# We want to continuously stream the memory content (looping over its address).
# The memory content is initialized with default values and there is a writer port
# to allow its modification.
# The writer port operates in the "sys" clock domain (100MHz) and the streaming is
# clocked by the "ser" clock domain at 150MHz.
# 100MHz │ 150MHz
# │
# ┌────────────┐ ┌───────────────┐ ┌────────┴─────────┐
# │ Dual port │ │ readport to │ │ │
# writer ───►│ ├────►│ ├───►│ stream.AsyncFIFO ├────► output
# │ memory │ │ stream │ │ │
# └────────────┘ └───────────────┘ └────────┬─────────┘
# │
# │
# This is the layout of the writer port
writer_layout = [
("address", 12),
("data", 32),
("valid", 1)
]
# This is the layout of the output stream
stream_layout = [
("address", 4),
("data", 32)
]
# These are the initialization data
init_data = [
0x1, 0x11223344,
0x0, 0x66998855,
0x1, 0x00000000,
0x4, 0x00000044,
0x8, 0x00000000,
0x5, 0x0000000A,
0x1, 0xFF000000
]
#------------------------------------------------
#-
#- Clock and reset
#-
#------------------------------------------------
class CRG(Module):
def __init__(self, platform, sys_clk_freq=100e6):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_ser = ClockDomain()
# # #
clk = platform.request("clk100")
rst_n = platform.request("cpu_reset")
self.submodules.pll = pll = S7PLL()
self.comb += pll.reset.eq(~rst_n)
pll.register_clkin(clk, 100e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_ser, 150e6)
platform.add_period_constraint(clk, 1e9/100e6)
platform.add_false_path_constraints(self.cd_sys.clk, self.cd_ser.clk)
#------------------------------------------------
#-
#- The module
#-
#------------------------------------------------
class WorkshopMem(Module):
def __init__(self, init_data):
self.source = source = stream.Endpoint(stream_layout)
self.writer = writer = Record(writer_layout)
###
#------------------------------------------------
#- Dual port memory
#------------------------------------------------
# Instantiate a memory with 32 bits words
# Set the initialization data to init_data
# Connect writer port to the memory
#------------------------------------------------
#- Asynchronous stream FIFO
#------------------------------------------------
# Add a stream.AsyncFIFO and connect its output to the output stream of
# this module
#------------------------------------------------
#- Memory read port to stream
#------------------------------------------------
# This will be the read address (must be incremented after each read
# and set to zero once the last address is reached)
read_addr = Signal(32)
fsm = FSM(reset_state="GET_ADDR")
self.submodules += fsm
fsm.act("GET_ADDR",
# Indicates the fifo that data are not valid
# Put the data from the memory to the fifo port
# Increment the address
NextState("GET_VALUE")
)
fsm.act("GET_VALUE",
# Put the data from the memory to the fifo port
# Increment or reset the address
# Tell the fifo that the data are valid
)
#------------------------------------------------
#-
#- Use the module on the Arty platform
#-
#------------------------------------------------
class TestMemory(Module):
def __init__(self, platform, init_data):
# Get pin from ressources
leds = platform.request_all("user_led")
btn = platform.request_all("user_btn")
btn_sync = Signal(len(btn))
for i in range(len(btn)):
self.specials += MultiReg(btn[i], btn_sync[i])
sw = platform.request_all("user_sw")
sw_sync = Signal(len(sw))
for i in range(len(sw)):
self.specials += MultiReg(sw[i], sw_sync[i])
self.submodules.crg = CRG(platform)
cnt = Signal(32)
memstream = WorkshopMem(init_data)
self.submodules += ClockDomainsRenamer({"write": "sys", "read": "ser"})(memstream)
self.sync += cnt.eq(cnt + 1)
self.comb += [
memstream.writer.valid.eq(cnt[0]),
memstream.writer.address.eq(btn_sync),
memstream.writer.data.eq(Replicate(sw_sync, 8)),
memstream.source.ready.eq(sw_sync[0]),
leds.eq(memstream.source.data[0:4] ^ memstream.source.data[4:8] ^
memstream.source.data[8:12] ^ memstream.source.data[12:16] ^
memstream.source.data[16:20] ^ memstream.source.data[20:24] ^
memstream.source.data[24:28] ^ memstream.source.data[28:32] ^
memstream.source.data[24:28] ^ memstream.source.data[28:32] ^
memstream.source.address ^ memstream.source.valid
)
]
#------------------------------------------------
#-
#- Testbench
#-
#------------------------------------------------
def write_ram(dut, addr, value):
yield dut.writer.address.eq(addr)
yield dut.writer.data.eq(value)
yield dut.writer.valid.eq(1)
yield
yield dut.writer.valid.eq(0)
yield
def test(dut):
for i in range(500):
# At some point in time, the sink connected
# to the fifo source can't receive data
if (i > 200) and (i < 300):
yield dut.source.ready.eq(0)
else:
yield dut.source.ready.eq(1)
# Here we change a value in memory
if i == 280:
yield from write_ram(dut, 11, 0xAABBCCDD)
yield
#------------------------------------------------
#-
#- Build / Sim
#-
#------------------------------------------------
def main():
if "sim" in sys.argv[1: ]:
dut = ClockDomainsRenamer({"write": "sys", "read": "sclk"})(WorkshopMem(init_data))
run_simulation(dut, test(dut), clocks={"sys": 1e9/10e6, "sclk": 1e9/100e6}, vcd_name="sim.vcd")
exit()
build_dir="gateware"
platform = arty.Platform(variant="a7-35", toolchain="vivado")
design = TestMemory(platform, init_data)
platform.build(design, build_dir=build_dir)
if __name__ == "__main__":
main()
| 32.240909 | 103 | 0.489215 |
33d1a54e4d6eb234ce0486854de3a5410970574f | 3,375 | py | Python | day_12/run.py | aghontpi/AdventOfCode2020 | 43ee0415f8be0f8bf1abd4f746a9153ccc2e7406 | [
"MIT"
] | null | null | null | day_12/run.py | aghontpi/AdventOfCode2020 | 43ee0415f8be0f8bf1abd4f746a9153ccc2e7406 | [
"MIT"
] | null | null | null | day_12/run.py | aghontpi/AdventOfCode2020 | 43ee0415f8be0f8bf1abd4f746a9153ccc2e7406 | [
"MIT"
] | null | null | null | #! /usr/bin/python3
# Action N means to move north by the given value.
# Action S means to move south by the given value.
# Action E means to move east by the given value.
# Action W means to move west by the given value.
# Action L means to turn left the given number of degrees.
# Action R means to turn right the given number of degrees.
# Action F means to move forward by the given value in the direction the ship is currently facing.
# constrainsts
# ship starts at facing east
# F10 would move the ship 10 units east (because the ship starts by facing east) to east 10, north 0.
# N3 would move the ship 3 units north to east 10, north 3.
# F7 would move the ship another 7 units east (because the ship is still facing east) to east 17, north 3.
# R90 would cause the ship to turn right by 90 degrees and face south; it remains at east 17, north 3.
# F11 would move the ship 11 units south to east 17, south 8.
def partOne(arr):
x = 0
y = 0
waypoint_x = 0
waypoint_y = 0
quadrants = [1, 1, -1, -1]
facing_direction = 1
# all calculations are based upon cardinal directions, north, east, south, west.
for item in arr:
# print(f"x: {x}, y: {y}")
operation = item[0:1]
value = int(item[1:])
if operation == 'S':
y -= value
elif operation == 'N':
y += value
elif operation == 'E':
x += value
elif operation == 'W':
x -= value
elif operation == 'R':
# 0 - 3, directions,
facing_direction = (facing_direction + int(value/90)) % 4
elif operation == 'L':
facing_direction = (facing_direction - int(value/90)) % 4
elif operation == 'F':
if facing_direction == 1 or facing_direction == 3:
x = quadrants[facing_direction] * value + x
else:
y = quadrants[facing_direction] * value + y
else:
assert False
# since using graph, representation in two dimensional place, even if value is negative,
# got to add up the two numbers
print(x, y, abs(x)+abs(y))
def partTwo(arr):
x = 0
y = 0
waypoint_x = 10 # east
waypoint_y = 1 # north
for item in arr:
operation = item[0:1]
value = int(item[1:])
if operation == 'S':
waypoint_y -= value
elif operation == 'N':
waypoint_y += value
elif operation == 'E':
waypoint_x += value
elif operation == 'W':
waypoint_x -= value
elif operation == 'R':
# 10 east, 4 north
# r 90
# 4 east, 10 south
rotate = (int(value/90))
for _ in range(rotate):
waypoint_x, waypoint_y = waypoint_y, -waypoint_x
elif operation == 'L':
rotate = (int(value/90))
for _ in range(rotate):
waypoint_x, waypoint_y = -waypoint_y, waypoint_x
elif operation == 'F':
x = x + value * waypoint_y
y = y + value * waypoint_x
else:
assert False
# since using graph, representation in two dimensional place, even if value is negative,
# got to add up the two numbers
print(x, y, abs(x)+abs(y))
inputs = [x.strip() for x in open('input.txt', 'r')]
partOne(inputs)
partTwo(inputs)
| 35.15625 | 106 | 0.577778 |
bf9600915ee5eddf3f02a35636850a2c983ea259 | 1,471 | py | Python | source/commons/message.py | julio9246/hg-poker-api | 56805601bc26bf8bb80e05235ae22a59a174af09 | [
"Apache-2.0"
] | null | null | null | source/commons/message.py | julio9246/hg-poker-api | 56805601bc26bf8bb80e05235ae22a59a174af09 | [
"Apache-2.0"
] | null | null | null | source/commons/message.py | julio9246/hg-poker-api | 56805601bc26bf8bb80e05235ae22a59a174af09 | [
"Apache-2.0"
] | null | null | null | BAD_REQUEST_VALIDATION = 'Erro de validação.'
# Validation
VALIDATION_ERROR = 'Erro de validação.'
VALIDATION_DATE_ERROR = 'Data inicial posterior a Data Final.'
# Register
REGISTER_ALREADY_EXIST = 'Registro já existente.'
REGISTER_NO_CONTENT = 'Nenhum Registro encontrado.'
REGISTER_NOT_FOUND = 'Registro não encontrado.'
REGISTER_IN_USE = 'Registro está vinculado em outra tabela.'
# Tournament
TOURNAMENT_CONFLICT = 'Tournament já existente.'
TOURNAMENT_NO_CONTENT = 'Nenhum Tournament encontrado.'
TOURNAMENT_NOT_FOUND = 'Tournament não encontrado.'
# Player
PLAYER_CONFLICT = 'Player Process já existente.'
PLAYER_NO_CONTENT = 'Nenhum Player Process encontrado.'
PLAYER_NOT_FOUND = 'Player Process não encontrado.'
# Player Game
PLAYER_GAME_CONFLICT = 'Player Game já existente.'
PLAYER_GAME_NO_CONTENT = 'Nenhum Player Game Process encontrado.'
PLAYER_GAME_NOT_FOUND = 'Player Game não encontrado.'
# Player Tournament
PLAYER_TOURNAMENT_CONFLICT = 'Player Tournament já existente.'
PLAYER_TOURNAMENT_NO_CONTENT = 'Nenhum Player Tournament Process encontrado.'
PLAYER_TOURNAMENT_NOT_FOUND = 'Player Tournament não encontrado.'
# Rebuy Game
REBUY_GAME_CONFLICT = 'Rebuy já existente.'
REBUY_GAME_NO_CONTENT = 'Nenhuma Rebuy encontrado.'
REBUY_GAME_NOT_FOUND = 'Rebuy Game não encontrado.'
# Game
GAME_CONFLICT = 'Game já existente.'
GAME_NO_CONTENT = 'Nenhum Game encontrado.'
GAME_NOT_FOUND = 'Game não encontrado.'
TOKEN_NOT_FOUND = 'Token não encontrado.' | 34.209302 | 77 | 0.804895 |
32c9518a1841570e0225b233fcac03dd5b8f33c5 | 5,982 | py | Python | tensorflow_datasets/core/community/register_package_test.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/core/community/register_package_test.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/core/community/register_package_test.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.community.register_path."""
import contextlib
import datetime
import json
import os
import pathlib
import sys
import tempfile
import textwrap
from typing import Iterator
from unittest import mock
from etils import epath
import pytest
from tensorflow_datasets.core import dataset_builder
from tensorflow_datasets.core import registered
from tensorflow_datasets.core import utils
from tensorflow_datasets.core.community import cache
from tensorflow_datasets.core.community import dataset_sources
from tensorflow_datasets.core.community import register_package
@contextlib.contextmanager
def mock_cache_path(new_cache_dir: epath.PathLike) -> Iterator[None]:
"""Mock which overwrite the cache path."""
new_dir = epath.Path(new_cache_dir)
# Use `__wrapped__` to access the original function wrapped inside
# `functools.lru_cache`
new_cache_path = utils.memoize()(cache.cache_path.__wrapped__)
new_module_path = utils.memoize()(cache.module_path.__wrapped__)
with mock.patch.object(cache, '_default_cache_dir', return_value=new_dir), \
mock.patch.object(cache, 'cache_path', new_cache_path), \
mock.patch.object(cache, 'module_path', new_module_path):
yield
@pytest.fixture(scope='module')
def dummy_register():
"""Dummy register."""
with tempfile.TemporaryDirectory() as tmp_path:
tmp_path = pathlib.Path(tmp_path)
source_path = utils.tfds_path() / 'testing/dummy_dataset/dummy_dataset.py'
# Single-file dataset package (without checksums)
src_single = dataset_sources.DatasetSource.from_json(os.fspath(source_path))
# Multi-file dataset package (with checksums)
src_multi = dataset_sources.DatasetSource.from_json({
'root_path': os.fspath(source_path.parent),
'filenames': ['checksums.tsv', 'dummy_dataset.py'],
})
src_multi_json = json.dumps(src_multi.to_json()) # `dict` -> `str`
# Create the remote index content
# Note the absence of `"` for the `src_multi_json` as it is parsed as `dict`
content = textwrap.dedent(f"""\
{{"name": "kaggle:dummy_dataset", "source": "{src_single.to_json()}"}}
{{"name": "kaggle:ds1", "source": "{src_single.to_json()}"}}
{{"name": "mlds:dummy_dataset", "source": {src_multi_json}}}
""")
dummy_path = tmp_path / 'dummy-community-datasets.toml'
dummy_path.write_text(content)
with mock_cache_path(tmp_path / 'cache'):
yield register_package.PackageRegister(path=dummy_path)
def test_builder_cls(dummy_register): # pylint: disable=redefined-outer-name
# The dataset will be installed in the cache
installed_path = cache.cache_path()
installed_path /= 'modules/tfds_community/kaggle/dummy_dataset'
assert not installed_path.exists()
ds_name = utils.DatasetName('kaggle:dummy_dataset')
builder_cls = dummy_register.builder_cls(ds_name)
assert builder_cls.name == 'dummy_dataset'
clshash = 'e58f413affd65c267bae7acbd27fd5ac673d3e3ae13c316ffc2a461d00c8ab56'
assert installed_path / f'{clshash}/dummy_dataset.py' == builder_cls.code_path
assert 'kaggle' in builder_cls.code_path.parts
assert issubclass(builder_cls, dataset_builder.DatasetBuilder)
assert not builder_cls.url_infos # No checksums installed with the package
# Dataset installed in the cache
# Filename should be deterministic
assert list(sorted(installed_path.iterdir())) == [installed_path / clshash]
# Reusing the dataset should re-use the cache
with mock.patch.object(
register_package,
'_download_and_cache',
side_effect=ValueError('Dataset should have been cached already')):
ds_name = utils.DatasetName('kaggle:dummy_dataset')
builder_cls2 = dummy_register.builder_cls(ds_name)
assert builder_cls is builder_cls2
# Datasets from different namespace can have the same name
ds_name = utils.DatasetName('mlds:dummy_dataset')
builder_cls = dummy_register.builder_cls(ds_name)
assert 'mlds' in builder_cls.code_path.parts
assert issubclass(builder_cls, dataset_builder.DatasetBuilder)
# Checksums have been correctly installed
assert 'http://dummy.org/data.txt' in builder_cls.url_infos
with pytest.raises(registered.DatasetNotFoundError):
dummy_register.builder(utils.DatasetName('other:ds0'))
def test_register_path_list_builders(dummy_register): # pylint: disable=redefined-outer-name
assert dummy_register.list_builders() == [
'kaggle:ds1',
'kaggle:dummy_dataset',
'mlds:dummy_dataset',
]
def test_dataset_package():
"""Exports/imports operation should be identity."""
pkg = register_package.DatasetPackage(
name=utils.DatasetName('ns:ds'),
source=dataset_sources.DatasetSource.from_json(
'github://<owner>/<name>/tree/<branch>/my_ds/ds.py',),
)
assert register_package.DatasetPackage.from_json(pkg.to_json()) == pkg
pkg2 = register_package._InstalledPackage(
package=pkg,
instalation_date=datetime.datetime.now(),
hash='asdajhdadsadsad',
)
assert register_package._InstalledPackage.from_json(pkg2.to_json()) == pkg2
def test_mock_cache_path(tmp_path: pathlib.Path):
with mock_cache_path(tmp_path):
assert os.fspath(tmp_path) not in sys.path
assert cache.cache_path() == tmp_path
assert cache.module_path() == tmp_path / 'modules'
assert os.fspath(tmp_path / 'modules') in sys.path
| 37.15528 | 93 | 0.752424 |
09aef82f1134119b07e990a00bb3ba631f764048 | 4,855 | py | Python | pkg/Python27/Lib/site-packages/envoy/core.py | jkolokotronis/ds_mod_tools | d9fd4def34f6adfd0e2b176d0a9bf2a3dfd43f93 | [
"MIT"
] | null | null | null | pkg/Python27/Lib/site-packages/envoy/core.py | jkolokotronis/ds_mod_tools | d9fd4def34f6adfd0e2b176d0a9bf2a3dfd43f93 | [
"MIT"
] | null | null | null | pkg/Python27/Lib/site-packages/envoy/core.py | jkolokotronis/ds_mod_tools | d9fd4def34f6adfd0e2b176d0a9bf2a3dfd43f93 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
envoy.core
~~~~~~~~~~
This module provides
"""
import os
import shlex
import subprocess
import threading
__version__ = '0.0.2'
__license__ = 'MIT'
__author__ = 'Kenneth Reitz'
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
self.out = None
self.err = None
self.returncode = None
self.data = None
def run(self, data, timeout):
self.data = data
def target():
self.process = subprocess.Popen(self.cmd,
universal_newlines=True,
shell=False,
env=os.environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
)
self.out, self.err = self.process.communicate(self.data)
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
self.process.terminate()
thread.join()
self.returncode = self.process.returncode
return self.out, self.err
class ConnectedCommand(object):
def __init__(self,
process=None,
std_in=None,
std_out=None,
std_err=None):
self._process = process
self.std_in = std_in
self.std_out = std_out
self.std_err = std_out
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.kill()
@property
def status_code(self):
"""
The status code of the process.
If the code is None, assume that it's still running.
"""
if self._status_code is not None:
return self._status_code
# investigate
return None
@property
def pid(self):
"""The process' PID."""
return self._process.pid
def kill(self):
"""Kills the process."""
return self._process.kill()
def expect(self, bytes, stream=None):
"""Block until given bytes appear in the stream."""
if stream is None:
stream = self.std_out
pass
def send(self, end='\n'):
"""Sends a line to std_in."""
pass
def block(self):
"""Blocks until command finishes. Returns Response instance."""
self._status_code = self._process.wait()
class Response(object):
"""A command's response"""
def __init__(self, process=None):
super(Response, self).__init__()
self._process = process
self.command = None
self.std_err = None
self.std_out = None
self.status_code = None
self.history = []
def __repr__(self):
if len(self.command):
return '<Response [{0}]>'.format(self.command[0])
else:
return '<Response>'
def expand_args(command):
"""Parses command strings and returns a Popen-ready list."""
# Prepare arguments.
if isinstance(command, basestring):
splitter = shlex.shlex(command, posix=True)
splitter.whitespace = '|'
splitter.whitespace_split = True
command = []
while True:
token = splitter.get_token()
if token:
command.append(token)
else:
break
command = map(shlex.split, command)
return command
def run(command, data=None, timeout=None):
"""Executes a given commmand and returns Response.
Blocks until process is complete, or timeout is reached.
"""
command = expand_args(command)
history = []
for c in command:
if len(history):
# due to broken pipe problems pass only first 10MB
data = history[-1].std_out[0:10*1024]
cmd = Command(c)
out, err = cmd.run(data, timeout)
r = Response(process=cmd)
r.command = c
r.std_out = out
r.std_err = err
r.status_code = cmd.returncode
history.append(r)
r = history.pop()
r.history = history
return r
def connect():
pass
def connect(command, data=None):
"""Spawns a new process from the given command.
"""
# TODO: support piped commands
command_str = expand_args(command).pop()
# cmd = ConnectedCommand()
# def target():
process = subprocess.Popen(command_str,
universal_newlines=True,
shell=False,
env=os.environ,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0,
)
c = ConnectedCommand(process=process)
return c
# out, err = process.communicate(data)
# thread = threading.Thread(target=target)
# thread.start()
# self.returncode = self.process.returncode
return cmd
| 22.581395 | 71 | 0.571782 |
ac60b1a69a7b17e6be475d977f82b408506884a1 | 1,397 | py | Python | setup.py | greenbender/django-gravy-bitfield | e43a0b5244455077d10f95a89e5f98eac7c57d6b | [
"Apache-2.0"
] | null | null | null | setup.py | greenbender/django-gravy-bitfield | e43a0b5244455077d10f95a89e5f98eac7c57d6b | [
"Apache-2.0"
] | null | null | null | setup.py | greenbender/django-gravy-bitfield | e43a0b5244455077d10f95a89e5f98eac7c57d6b | [
"Apache-2.0"
] | null | null | null | import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-gravy-bitfield',
version='1.9.1',
packages=find_packages(),
install_requires=[
'Django>=1.8',
'six',
],
extras_require={
'tests': [
'flake8',
'mysqlclient',
'psycopg2>=2.3',
'pytest-django',
],
},
include_package_data=True,
license='Apache License',
description='BitField in Django',
long_description=README,
author='DISQUS',
author_email='opensource@disqus.com',
url='https://github.com/disqus/django-bitfield',
zip_safe=False,
classifiers=[
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development',
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
)
| 27.392157 | 78 | 0.598425 |
e4030b4aea2f04ae8e4fc24f8e8f32ca53363ccd | 27 | py | Python | src/staticlocal/__init__.py | billsix/staticlocal | fe99ec9008a0c248830b17551ffd9ae1e4a0ca3e | [
"MIT"
] | null | null | null | src/staticlocal/__init__.py | billsix/staticlocal | fe99ec9008a0c248830b17551ffd9ae1e4a0ca3e | [
"MIT"
] | null | null | null | src/staticlocal/__init__.py | billsix/staticlocal | fe99ec9008a0c248830b17551ffd9ae1e4a0ca3e | [
"MIT"
] | null | null | null | from .staticlocal import *
| 13.5 | 26 | 0.777778 |
495784a69d05e7768b7c5312feef90aac85ea3e4 | 2,307 | py | Python | tests/test_io.py | jwcarr/eyek | 78375e0a3a28610bfbcd2d17345d8336374ca5d8 | [
"MIT"
] | 10 | 2020-11-21T19:36:03.000Z | 2022-03-10T12:37:56.000Z | tests/test_io.py | jwcarr/eyek | 78375e0a3a28610bfbcd2d17345d8336374ca5d8 | [
"MIT"
] | 1 | 2020-10-21T16:40:38.000Z | 2021-01-10T19:17:08.000Z | tests/test_io.py | jwcarr/eyek | 78375e0a3a28610bfbcd2d17345d8336374ca5d8 | [
"MIT"
] | 3 | 2021-02-02T18:37:38.000Z | 2021-11-12T08:51:27.000Z | from tempfile import TemporaryDirectory
from pathlib import Path
import eyekit
EXAMPLE_DATA = Path("example") / "example_data.json"
EXAMPLE_TEXTS = Path("example") / "example_texts.json"
EXAMPLE_ASC = Path("example") / "example_data.asc"
EXAMPLE_CSV = Path("example") / "example_data.csv"
def test_load_data():
data = eyekit.io.load(EXAMPLE_DATA)
assert isinstance(data["trial_0"]["fixations"], eyekit.FixationSequence)
assert isinstance(data["trial_1"]["fixations"], eyekit.FixationSequence)
assert isinstance(data["trial_2"]["fixations"], eyekit.FixationSequence)
assert data["trial_0"]["fixations"][0].x == 412
assert data["trial_0"]["fixations"][1].y == 163
assert data["trial_0"]["fixations"][2].duration == 333
def test_load_texts():
texts = eyekit.io.load(EXAMPLE_TEXTS)
assert isinstance(texts["passage_a"]["text"], eyekit.TextBlock)
assert isinstance(texts["passage_b"]["text"], eyekit.TextBlock)
assert isinstance(texts["passage_c"]["text"], eyekit.TextBlock)
assert texts["passage_a"]["text"].position == (360, 161)
assert texts["passage_b"]["text"].font_face == "Courier New"
assert texts["passage_c"]["text"].align == "left"
assert texts["passage_c"]["text"].anchor == "left"
def test_save():
data = eyekit.io.load(EXAMPLE_DATA)
with TemporaryDirectory() as temp_dir:
output_file = Path(temp_dir) / "output.json"
eyekit.io.save(data, output_file)
written_data = eyekit.io.load(output_file)
original_seq = data["trial_0"]["fixations"]
written_seq = written_data["trial_0"]["fixations"]
for fxn1, fxn2 in zip(original_seq, written_seq):
assert fxn1.serialize() == fxn2.serialize()
def test_import_asc():
try:
data = eyekit.io.import_asc(EXAMPLE_ASC, variables=["trial_type"])
except FileNotFoundError:
return
assert data[0]["trial_type"] == "Practice"
assert data[1]["fixations"].duration == 72279
assert data[2]["fixations"][0].x == 1236
def test_import_csv():
try:
data = eyekit.io.import_csv(EXAMPLE_CSV, trial_header="trial")
except FileNotFoundError:
return
assert data[0]["fixations"].duration == 78505
assert data[1]["fixations"].duration == 60855
assert data[2]["fixations"].duration == 57468
| 37.209677 | 76 | 0.686173 |
f0042c498192140ec8b725cc5caffb7194603c6a | 4,810 | py | Python | pydlbot_ui - Copy.py | iarwain8a/pydlbot | 3d471795da3f5f17f2e784a9fa508f9dd5613bd3 | [
"MIT"
] | null | null | null | pydlbot_ui - Copy.py | iarwain8a/pydlbot | 3d471795da3f5f17f2e784a9fa508f9dd5613bd3 | [
"MIT"
] | null | null | null | pydlbot_ui - Copy.py | iarwain8a/pydlbot | 3d471795da3f5f17f2e784a9fa508f9dd5613bd3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pydlbot.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
cont = 0
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(755, 459)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSpacing(6)
self.horizontalLayout.setObjectName("horizontalLayout")
self.addtext = QtWidgets.QLineEdit(self.centralwidget)
self.addtext.setObjectName("addtext")
self.horizontalLayout.addWidget(self.addtext)
self.add = QtWidgets.QPushButton(self.centralwidget)
self.add.setObjectName("add")
self.horizontalLayout.addWidget(self.add)
self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 2)
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setContentsMargins(-1, 100, -1, -1)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_4.setObjectName("pushButton_4")
self.verticalLayout_3.addWidget(self.pushButton_4)
self.gridLayout.addLayout(self.verticalLayout_3, 2, 1, 1, 1)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setContentsMargins(-1, -1, -1, 70)
self.verticalLayout_2.setSpacing(20)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setObjectName("pushButton_2")
self.verticalLayout_2.addWidget(self.pushButton_2)
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setObjectName("pushButton_3")
self.verticalLayout_2.addWidget(self.pushButton_3)
self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_5.setObjectName("pushButton_5")
self.verticalLayout_2.addWidget(self.pushButton_5)
self.gridLayout.addLayout(self.verticalLayout_2, 1, 1, 1, 1)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.listWidget = QtWidgets.QListWidget(self.centralwidget)
self.listWidget.setObjectName("listWidget")
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
self.horizontalLayout_3.addWidget(self.listWidget)
self.gridLayout.addLayout(self.horizontalLayout_3, 1, 0, 2, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 755, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.addtext.returnPressed.connect(self.listWidget.reset)
self.pushButton_5.pressed.connect(self.listWidget.clearSelection)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.pushButton_3.clicked.connect(self.puta)
#self.addtext.returnPressed.connect()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Escape:
self.close()
def puta(self):
self.cont = self.cont + 1
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
item = self.listWidget.item(self.cont)
text = self.addtext.text()
item.setText(text)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Pydlbot"))
self.add.setText(_translate("MainWindow", "Add"))
self.pushButton_4.setText(_translate("MainWindow", "Quit"))
self.pushButton_2.setText(_translate("MainWindow", "Edit"))
self.pushButton_3.setText(_translate("MainWindow", "Download now"))
self.pushButton_5.setText(_translate("MainWindow", "Delete"))
__sortingEnabled = self.listWidget.isSortingEnabled()
self.listWidget.setSortingEnabled(False)
item = self.listWidget.item(0)
item.setText(_translate("MainWindow", "flash"))
self.listWidget.setSortingEnabled(__sortingEnabled)
| 47.623762 | 75 | 0.706445 |
4eea5513c721706412c5c576bac1bc5b7f062779 | 6,452 | py | Python | hashgraph.py | prakashcc/HashPay | 437324e39fa71494324d1a9718e252314d76735d | [
"MIT"
] | null | null | null | hashgraph.py | prakashcc/HashPay | 437324e39fa71494324d1a9718e252314d76735d | [
"MIT"
] | null | null | null | hashgraph.py | prakashcc/HashPay | 437324e39fa71494324d1a9718e252314d76735d | [
"MIT"
] | 1 | 2019-01-04T16:04:04.000Z | 2019-01-04T16:04:04.000Z | # -*- coding: utf-8 -*-
from random import shuffle
import sys
from base64 import b64encode
from time import localtime, strftime
from bokeh.io import curdoc
from bokeh.layouts import layout, widgetbox, row
from bokeh.plotting import figure
from bokeh.palettes import plasma, small_palettes
from bokeh.models import (
FixedTicker, Button, ColumnDataSource, PanTool, Scroll,
RadioButtonGroup, RadioGroup, Arrow, NormalHead, HoverTool)
from pysodium import crypto_sign_keypair
from utils import bfs, randrange
from swirld import Node
R_COLORS = small_palettes['Greens'][9]
shuffle(R_COLORS)
def round_color(r):
return R_COLORS[r % 9]
I_COLORS = plasma(256)
def idx_color(r):
return I_COLORS[r % 256]
class App:
def __init__(self, n_nodes):
self.i = 0
kps = [crypto_sign_keypair() for _ in range(n_nodes)]
stake = {kp[0]: 1 for kp in kps}
network = {}
self.nodes = [Node(kp, network, n_nodes, stake) for kp in kps]
for n in self.nodes:
network[n.pk] = n.ask_sync
self.ids = {kp[0]: i for i, kp in enumerate(kps)}
self.main_its = [n.main() for n in self.nodes]
for m in self.main_its:
next(m)
def toggle():
if play.label == '► Play':
play.label = '❚❚ Pause'
curdoc().add_periodic_callback(self.animate, 50)
else:
play.label = '► Play'
curdoc().remove_periodic_callback(self.animate)
play = Button(label='► Play', width=60)
play.on_click(toggle)
def sel_node(new):
self.active = new
node = self.nodes[new]
self.tbd = {}
self.tr_src.data, self.links_src.data = self.extract_data(
node, bfs((node.head,), lambda u: node.hg[u].p), 0)
for u, j in tuple(self.tbd.items()):
self.tr_src.data['line_alpha'][j] = 1 if node.famous.get(u) else 0
if u in node.idx:
self.tr_src.data['round_color'][j] = idx_color(node.idx[u])
self.tr_src.data['idx'][j] = node.idx.get(u)
if u in node.idx and u in node.famous:
del self.tbd[u]
print('updated')
self.tr_src.trigger('data', None, self.tr_src.data)
selector = RadioButtonGroup(
labels=['Node %i' % i for i in range(n_nodes)], active=0,
name='Node to inspect')
selector.on_click(sel_node)
plot = figure(
plot_height=700, plot_width=900, y_range=(0, 30),
tools=[PanTool(),
HoverTool(tooltips=[
('round', '@round'), ('hash', '@hash'),
('timestamp', '@time'), ('payload', '@payload'),
('number', '@idx')])])
plot.xgrid.grid_line_color = None
plot.xaxis.minor_tick_line_color = None
plot.ygrid.grid_line_color = None
plot.yaxis.minor_tick_line_color = None
self.links_src = ColumnDataSource(data={'x0': [], 'y0': [], 'x1': [],
'y1': [], 'width': []})
#self.links_rend = plot.add_layout(
# Arrow(end=NormalHead(fill_color='black'), x_start='x0', y_start='y0', x_end='x1',
# y_end='y1', source=self.links_src))
self.links_rend = plot.segment(color='#777777',
x0='x0', y0='y0', x1='x1',
y1='y1', source=self.links_src, line_width='width')
self.tr_src = ColumnDataSource(
data={'x': [], 'y': [], 'round_color': [], 'idx': [],
'line_alpha': [], 'round': [], 'hash': [], 'payload': [],
'time': []})
self.tr_rend = plot.circle(x='x', y='y', size=20, color='round_color',
line_alpha='line_alpha', source=self.tr_src, line_width=5)
sel_node(0)
curdoc().add_root(row([widgetbox(play, selector, width=300), plot], sizing_mode='fixed'))
def extract_data(self, node, trs, i):
tr_data = {'x': [], 'y': [], 'round_color': [], 'idx': [],
'line_alpha': [], 'round': [], 'hash': [], 'payload': [],
'time': []}
links_data = {'x0': [], 'y0': [], 'x1': [], 'y1': [], 'width': []}
for j, u in enumerate(trs):
self.tbd[u] = i + j
ev = node.hg[u]
x = self.ids[ev.c]
y = node.height[u]
tr_data['x'].append(x)
tr_data['y'].append(y)
tr_data['round_color'].append(round_color(node.round[u]))
tr_data['round'].append(node.round[u])
tr_data['hash'].append(b64encode(u).decode('utf8'))
tr_data['payload'].append(ev.d)
tr_data['time'].append(strftime("%Y-%m-%d %H:%M:%S", localtime(ev.t)))
tr_data['idx'].append(None)
tr_data['line_alpha'].append(None)
if ev.p:
links_data['x0'].extend((x, x))
links_data['y0'].extend((y, y))
links_data['x1'].append(self.ids[node.hg[ev.p[0]].c])
links_data['x1'].append(self.ids[node.hg[ev.p[1]].c])
links_data['y1'].append(node.height[ev.p[0]])
links_data['y1'].append(node.height[ev.p[1]])
links_data['width'].extend((3, 1))
return tr_data, links_data
def animate(self):
r = randrange(len(self.main_its))
print('working node: %i, event number: %i' % (r, self.i))
self.i += 1
new = next(self.main_its[r])
if r == self.active:
tr, links = self.extract_data(self.nodes[r], new, len(self.tr_src.data['x']))
self.tr_src.stream(tr)
self.links_src.stream(links)
for u, j in tuple(self.tbd.items()):
self.tr_src.data['line_alpha'][j] = 1 if self.nodes[r].famous.get(u) else 0
if u in self.nodes[r].idx:
self.tr_src.data['round_color'][j] = idx_color(self.nodes[r].idx[u])
self.tr_src.data['idx'][j] = self.nodes[r].idx.get(u)
if u in self.nodes[r].idx and u in self.nodes[r].famous:
del self.tbd[u]
print('updated')
self.tr_src.trigger('data', None, self.tr_src.data)
App(int(sys.argv[1]))
| 39.10303 | 98 | 0.522164 |
39d9229198f1d1cfbef78c0352541def56c7d3c1 | 708 | py | Python | migrations/versions/f98defc5cf6a_add_column_picture_path_in_users_for_.py | Nyota254/the-pitch | b3015c8ebc62a91085c39cf23e2feab0bd9b3b89 | [
"MIT"
] | null | null | null | migrations/versions/f98defc5cf6a_add_column_picture_path_in_users_for_.py | Nyota254/the-pitch | b3015c8ebc62a91085c39cf23e2feab0bd9b3b89 | [
"MIT"
] | null | null | null | migrations/versions/f98defc5cf6a_add_column_picture_path_in_users_for_.py | Nyota254/the-pitch | b3015c8ebc62a91085c39cf23e2feab0bd9b3b89 | [
"MIT"
] | null | null | null | """add column picture path in Users for profile picture
Revision ID: f98defc5cf6a
Revises: 2eb7d837048f
Create Date: 2019-09-16 14:24:51.233455
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f98defc5cf6a'
down_revision = '2eb7d837048f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('profile_pic_path', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'profile_pic_path')
# ### end Alembic commands ###
| 24.413793 | 85 | 0.704802 |
ba756816c8a43e99dee2e3368a56a9de4d2fab78 | 4,491 | py | Python | hyppo/kgof/data.py | zdbzdb123123/hyppo | c22dcfb7bdf25c9945e6d4ddd7c6bfe5fcdd0cde | [
"MIT"
] | 116 | 2020-02-28T10:29:22.000Z | 2022-03-22T12:19:39.000Z | hyppo/kgof/data.py | zdbzdb123123/hyppo | c22dcfb7bdf25c9945e6d4ddd7c6bfe5fcdd0cde | [
"MIT"
] | 253 | 2020-02-17T16:18:56.000Z | 2022-03-30T16:55:02.000Z | hyppo/kgof/data.py | zdbzdb123123/hyppo | c22dcfb7bdf25c9945e6d4ddd7c6bfe5fcdd0cde | [
"MIT"
] | 27 | 2020-03-02T21:07:41.000Z | 2022-03-08T08:33:23.000Z | """
Module containing data structures for representing datasets.
Contains overlapping functionality with sims that exist in hyppo.tools.
Module will be refactored to remove dependencies on this object.
"""
from __future__ import print_function, division
from builtins import range, object
from past.utils import old_div
from abc import ABC, abstractmethod
import autograd.numpy as np
from ._utils import tr_te_indices
import scipy.stats as stats
from numpy.random import default_rng
class Data(object):
"""
Class representing a dataset i.e., en encapsulation of a data matrix
whose rows are vectors drawn from a distribution.
"""
def __init__(self, X):
"""
:param X: n x d numpy array for dataset X
"""
self.X = X
def dim(self):
"""Return the dimension of the data."""
dx = self.X.shape[1]
return dx
def sample_size(self):
return self.X.shape[0]
def n(self):
return self.sample_size()
def data(self):
"""Return the data matrix."""
return self.X
def split_tr_te(self, tr_proportion=0.5, seed=820, return_tr_ind=False):
"""Split the dataset into training and test sets.
Return (Data for tr, Data for te)"""
X = self.X
nx, dx = X.shape
Itr, Ite = tr_te_indices(nx, tr_proportion, seed)
tr_data = Data(X[Itr, :])
te_data = Data(X[Ite, :])
if return_tr_ind:
return (tr_data, te_data, Itr)
else:
return (tr_data, te_data)
def subsample(self, n, seed=87, return_ind=False):
"""Subsample without replacement. Return a new Data."""
if n > self.X.shape[0]:
raise ValueError("n should not be larger than sizes of X")
rng = default_rng(seed)
ind_x = rng.choice(self.X.shape[0], n, replace=False)
if return_ind:
return Data(self.X[ind_x, :]), ind_x
else:
return Data(self.X[ind_x, :])
def clone(self):
"""
Return a new Data object with a separate copy of each internal
variable, and with the same content.
"""
nX = np.copy(self.X)
return Data(nX)
def __add__(self, data2):
"""
Merge the current Data with another one.
Create a new Data and create a new copy for all internal variables.
"""
copy = self.clone()
copy2 = data2.clone()
nX = np.vstack((copy.X, copy2.X))
return Data(nX)
class DataSource(ABC):
"""
A source of data allowing resampling. Subclasses may prefix
class names with DS.
"""
@abstractmethod
def sample(self, n, seed):
"""Return a Data. Returned result should be deterministic given
the input (n, seed)."""
raise NotImplementedError()
def dim(self):
"""
Return the dimension of the data. If possible, subclasses should
override this. Determining the dimension by sampling may not be
efficient, especially if the sampling relies on MCMC.
"""
dat = self.sample(n=1, seed=3)
return dat.dim()
class DSIsotropicNormal(DataSource):
"""
A DataSource providing samples from a mulivariate isotropic normal
distribution.
"""
def __init__(self, mean, variance):
"""
mean: a numpy array of length d for the mean
variance: a positive floating-point number for the variance.
"""
assert len(mean.shape) == 1
self.mean = mean
self.variance = variance
def sample(self, n, seed=2):
rng = default_rng(seed)
d = len(self.mean)
mean = self.mean
variance = self.variance
X = rng.standard_normal(size=(n, d)) * np.sqrt(variance) + mean
return Data(X)
class DSNormal(DataSource):
"""
A DataSource implementing a multivariate Gaussian.
"""
def __init__(self, mean, cov):
"""
mean: a numpy array of length d.
cov: d x d numpy array for the covariance.
"""
self.mean = mean
self.cov = cov
assert mean.shape[0] == cov.shape[0]
assert cov.shape[0] == cov.shape[1]
def sample(self, n, seed=3):
rng = default_rng(seed)
mvn = stats.multivariate_normal(self.mean, self.cov)
X = mvn.rvs(size=n)
if len(X.shape) == 1:
# This can happen if d=1
X = X[:, np.newaxis]
return Data(X)
| 28.605096 | 76 | 0.598085 |
00311eac42692ad013fb815547595a18d3a99999 | 764 | py | Python | model/formulas/__init__.py | thieu1995/IFCB | 4a5936f93e4e317915dfcd12682829cf20a39552 | [
"MIT"
] | 4 | 2021-02-05T13:45:23.000Z | 2022-03-09T05:44:58.000Z | model/formulas/__init__.py | bkc-group/IFCB | 1ada1151eb057510c16b0ed66b980b736603a0e5 | [
"MIT"
] | null | null | null | model/formulas/__init__.py | bkc-group/IFCB | 1ada1151eb057510c16b0ed66b980b736603a0e5 | [
"MIT"
] | 2 | 2021-02-15T14:55:22.000Z | 2021-03-13T08:48:17.000Z | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 15:47, 06/01/2021 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
| 84.888889 | 105 | 0.197644 |
549df2c3702bdfde163db777fdf555e0acec323a | 1,146 | py | Python | leadgen_viewer/contrib/sites/migrations/0002_set_site_domain_and_name.py | sujitmaharjan/mytestrepo | 5a38550317120f6639e5a4e825372502f6a6312b | [
"MIT"
] | null | null | null | leadgen_viewer/contrib/sites/migrations/0002_set_site_domain_and_name.py | sujitmaharjan/mytestrepo | 5a38550317120f6639e5a4e825372502f6a6312b | [
"MIT"
] | null | null | null | leadgen_viewer/contrib/sites/migrations/0002_set_site_domain_and_name.py | sujitmaharjan/mytestrepo | 5a38550317120f6639e5a4e825372502f6a6312b | [
"MIT"
] | null | null | null | """
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.org/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "leadgen.sujit.net.np",
"name": "leadgen_viewer"
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "example.com"
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
| 24.382979 | 130 | 0.643979 |
539ae3ce09506220cd980048185ff686ffb30c8f | 67,585 | py | Python | docs.py | bdmbdsm/openprocurement.auctions.geb | 7d4db2c0c66fbe896dc76cead44bbb0f701c2353 | [
"Apache-2.0"
] | null | null | null | docs.py | bdmbdsm/openprocurement.auctions.geb | 7d4db2c0c66fbe896dc76cead44bbb0f701c2353 | [
"Apache-2.0"
] | null | null | null | docs.py | bdmbdsm/openprocurement.auctions.geb | 7d4db2c0c66fbe896dc76cead44bbb0f701c2353 | [
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import os
from datetime import timedelta, datetime
from openprocurement.auctions.core.utils import get_now
from openprocurement.auctions.core.tests.base import PrefixedRequestClass
import openprocurement.auctions.geb.tests.base as base_test
from openprocurement.auctions.geb.tests.base import test_auction_data as base_test_auction_data, test_bids
from openprocurement.auctions.geb.tests.base import test_auction_maximum_data
from openprocurement.auctions.geb.tests.tender import BaseAuctionWebTest
from webtest import TestApp
now = datetime.now()
test_auction_data = base_test_auction_data.copy()
bid = {
"data": {
"tenderers": [
{
"address": {
"countryName": "Україна",
"locality": "м. Вінниця",
"postalCode": "21100",
"region": "м. Вінниця",
"streetAddress": "вул. Островського, 33"
},
"contactPoint": {
"email": "soleksuk@gmail.com",
"name": "Сергій Олексюк",
"telephone": "+380 (432) 21-69-30"
},
"identifier": {
"scheme": u"UA-EDR",
"id": u"00137256",
"uri": u"http://www.sc.gov.ua/"
},
"name": "ДКП «Школяр»"
}
],
"status": "draft",
"qualified": True,
"value": {
"amount": 500
}
}
}
bid2 = {
"data": {
"tenderers": [
{
"address": {
"countryName": "Україна",
"locality": "м. Львів",
"postalCode": "79013",
"region": "м. Львів",
"streetAddress": "вул. Островського, 34"
},
"contactPoint": {
"email": "aagt@gmail.com",
"name": "Андрій Олексюк",
"telephone": "+380 (322) 91-69-30"
},
"identifier": {
"scheme": u"UA-EDR",
"id": u"00137226",
"uri": u"http://www.sc.gov.ua/"
},
"name": "ДКП «Книга»"
}
],
"qualified": True,
"value": {
"amount": 501
}
}
}
question = {
"data": {
"author": {
"address": {
"countryName": "Україна",
"locality": "м. Вінниця",
"postalCode": "21100",
"region": "м. Вінниця",
"streetAddress": "вул. Островського, 33"
},
"contactPoint": {
"email": "soleksuk@gmail.com",
"name": "Сергій Олексюк",
"telephone": "+380 (432) 21-69-30"
},
"identifier": {
"id": "00137226",
"legalName": "Державне комунальне підприємство громадського харчування «Школяр»",
"scheme": "UA-EDR",
"uri": "http://sch10.edu.vn.ua/"
},
"name": "ДКП «Школяр»"
},
"description": "Просимо додати таблицю потрібної калорійності харчування",
"title": "Калорійність"
}
}
answer = {
"data": {
"answer": "Таблицю додано в файлі \"Kalorijnist.xslx\""
}
}
cancellation = {
'data': {
'reason': 'cancellation reason'
}
}
test_complaint_data = {'data':
{
'title': 'complaint title',
'description': 'complaint description',
'author': bid["data"]["tenderers"][0]
}}
class DumpsTestAppwebtest(TestApp):
def do_request(self, req, status=None, expect_errors=None):
req.headers.environ["HTTP_HOST"] = "api-sandbox.ea.openprocurement.org"
if hasattr(self, 'file_obj') and not self.file_obj.closed:
self.file_obj.write(req.as_bytes(True))
self.file_obj.write("\n")
if req.body:
try:
self.file_obj.write(
'\n' +
json.dumps(
json.loads(
req.body),
indent=2,
ensure_ascii=False).encode('utf8'))
self.file_obj.write("\n")
except Exception:
pass
self.file_obj.write("\n")
resp = super(
DumpsTestAppwebtest,
self).do_request(
req,
status=status,
expect_errors=expect_errors)
if hasattr(self, 'file_obj') and not self.file_obj.closed:
headers = sorted([(n.title(), v)
for n, v in resp.headerlist
if n.lower() != 'content-length'])
self.file_obj.write(str('\n%s\n%s\n') % (
resp.status,
str('\n').join([str('%s: %s') % (n, v) for n, v in headers]),
))
if resp.testbody:
try:
self.file_obj.write(
'\n' +
json.dumps(
json.loads(
resp.testbody),
indent=2,
ensure_ascii=False).encode('utf8'))
except Exception:
pass
self.file_obj.write("\n\n")
return resp
class AuctionResourceTest(BaseAuctionWebTest):
initial_data = test_auction_data
initial_bids = test_bids
docservice = True
def setUp(self):
self.app = DumpsTestAppwebtest(
"config:tests.ini",
relative_to=os.path.dirname(
base_test.__file__))
self.app.RequestClass = PrefixedRequestClass
self.app.authorization = ('Basic', ('broker', ''))
self.couchdb_server = self.app.app.registry.couchdb_server
self.db = self.app.app.registry.db
if self.docservice:
self.setUpDS()
self.app.app.registry.docservice_url = 'http://public.docs-sandbox.ea.openprocurement.org'
def generate_docservice_url(self):
return super(AuctionResourceTest, self).generate_docservice_url().replace(
'/localhost/', '/public.docs-sandbox.ea.openprocurement.org/')
def test_docs_acceleration(self):
# SANDBOX_MODE=TRUE
data = test_auction_data.copy()
data['procurementMethodDetails'] = 'quick, accelerator=1440'
data['submissionMethodDetails'] = 'quick'
data['mode'] = 'test'
data["auctionPeriod"] = {
"startDate": (now + timedelta(days=12)).isoformat()
}
with open('docs/source/tutorial/auction-post-acceleration.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions?opt_pretty=1', {"data": data})
self.assertEqual(response.status, '201 Created')
auction = response.json['data']
self.auction_id = auction['id']
def test_docs_2pc(self):
# Creating auction in draft status
#
data = test_auction_data.copy()
data['status'] = 'draft'
with open('docs/source/tutorial/auction-post-2pc.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions?opt_pretty=1', {"data": data})
self.assertEqual(response.status, '201 Created')
auction = response.json['data']
self.auction_id = auction['id']
owner_token = response.json['access']['token']
# switch to 'active.tendering'
with open('docs/source/tutorial/auction-patch-2pc.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}?acc_token={}'.format(
auction['id'], owner_token), {
'data': {
"status": 'active.tendering'}})
self.assertEqual(response.status, '200 OK')
def test_docs_tutorial(self):
request_path = '/auctions?opt_pretty=1'
# Exploring basic rules
#
with open('docs/source/tutorial/auction-listing.http', 'w') as self.app.file_obj:
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.app.file_obj.write("\n")
with open('docs/source/tutorial/auction-post-attempt.http', 'w') as self.app.file_obj:
response = self.app.post(request_path, 'data', status=415)
self.assertEqual(response.status, '415 Unsupported Media Type')
self.app.authorization = ('Basic', ('broker', ''))
with open('docs/source/tutorial/auction-post-attempt-json.http', 'w') as self.app.file_obj:
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post(
request_path,
'data',
content_type='application/json',
status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
# Creating auction
#
with open('docs/source/tutorial/auction-post-attempt-json-data.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions?opt_pretty=1', {"data": test_auction_data})
self.assertEqual(response.status, '201 Created')
auction = response.json['data']
owner_token = response.json['access']['token']
data = test_auction_data.copy()
data["auctionPeriod"] = {
"startDate": (now + timedelta(days=6)).isoformat()
}
with open('docs/source/tutorial/tenderperiod-validation-error.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions?opt_pretty=1', {"data": data}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
with open('docs/source/tutorial/blank-auction-view.http', 'w') as self.app.file_obj:
response = self.app.get('/auctions/{}'.format(auction['id']))
self.assertEqual(response.status, '200 OK')
self.app.get('/auctions')
with open('docs/source/tutorial/initial-auction-listing.http', 'w') as self.app.file_obj:
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/create-auction-procuringEntity.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions?opt_pretty=1', {"data": test_auction_maximum_data})
self.assertEqual(response.status, '201 Created')
response = self.app.post_json(
'/auctions?opt_pretty=1', {"data": test_auction_data})
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/auction-listing-after-procuringEntity.http', 'w') as self.app.file_obj:
response = self.app.get('/auctions')
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('broker', ''))
# Modifying auction
#
tenderPeriod_endDate = get_now() + timedelta(days=15, seconds=10)
with open('docs/source/tutorial/patch-items-value-periods.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}?acc_token={}'.format(
auction['id'], owner_token), {
'data': {
"tenderPeriod": {
"endDate": tenderPeriod_endDate.isoformat()}}})
self.app.get(request_path)
with open('docs/source/tutorial/auction-listing-after-patch.http', 'w') as self.app.file_obj:
self.app.authorization = None
response = self.app.get(request_path)
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('broker', ''))
self.auction_id = auction['id']
self.go_to_rectificationPeriod_end()
data = test_auction_data.copy()
with open('docs/source/tutorial/out-of-rectification-period-editing-denied.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}?acc_token={}'.format(
auction['id'], owner_token), {
"data": data}, status=403)
self.assertEqual(response.status, '403 Forbidden')
# Uploading documentation
#
with open('docs/source/tutorial/upload-auction-notice.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions/{}/documents?acc_token={}'.format(
self.auction_id,
owner_token),
{
'data': {
'title': u'Notice.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
"documentType": "technicalSpecifications",
"description": "document description",
}})
self.assertEqual(response.status, '201 Created')
doc_id = response.json["data"]["id"]
with open('docs/source/tutorial/auction-documents.http', 'w') as self.app.file_obj:
response = self.app.get('/auctions/{}/documents/{}'.format(
self.auction_id, doc_id))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/upload-award-criteria.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions/{}/documents?acc_token={}'.format(
self.auction_id,
owner_token),
{
'data': {
'title': u'AwardCriteria.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
}})
self.assertEqual(response.status, '201 Created')
doc_id = response.json["data"]["id"]
with open('docs/source/tutorial/auction-documents-2.http', 'w') as self.app.file_obj:
response = self.app.get('/auctions/{}/documents'.format(
self.auction_id))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/update-award-criteria.http', 'w') as self.app.file_obj:
response = self.app.put_json(
'/auctions/{}/documents/{}?acc_token={}'.format(
self.auction_id,
doc_id,
owner_token),
{
'data': {
'title': u'AwardCriteria-2.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/auction-documents-3.http', 'w') as self.app.file_obj:
response = self.app.get('/auctions/{}/documents'.format(
self.auction_id))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/upload-first-auction-illustration.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions/{}/documents?acc_token={}'.format(
self.auction_id,
owner_token),
{
'data': {
'title': u'first_illustration.jpeg',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'image/jpeg',
"documentType": "illustration",
"description": "First illustration description",
"index": 1}})
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/auction-documents-4.http', 'w') as self.app.file_obj:
response = self.app.get('/auctions/{}/documents'.format(
self.auction_id))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/upload-second-auction-illustration.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions/{}/documents?acc_token={}'.format(
self.auction_id,
owner_token),
{
'data': {
'title': u'second_illustration.jpeg',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'image/jpeg',
"documentType": "illustration",
"description": "Second illustration description",
"index": 2}})
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/upload-third-auction-illustration.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions/{}/documents?acc_token={}'.format(
self.auction_id,
owner_token),
{
'data': {
'title': u'third_illustration.jpeg',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'image/jpeg',
"documentType": "illustration",
"description": "Third illustration description",
"index": 2}})
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/auction-documents-5.http', 'w') as self.app.file_obj:
response = self.app.get('/auctions/{}/documents'.format(
self.auction_id))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/add-asset-familiarization-document.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions/{}/documents?acc_token={}'.format(
self.auction_id,
owner_token),
{
'data': {
'title': u'Familiarization with bank asset',
"documentType": "x_dgfAssetFamiliarization",
'accessDetails': "Familiar with asset: days, time, address",
}})
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/auction-documents-6.http', 'w') as self.app.file_obj:
response = self.app.get('/auctions/{}/documents'.format(
self.auction_id))
self.assertEqual(response.status, '200 OK')
# Enquiries
#
with open('docs/source/tutorial/ask-question.http', 'w') as self.app.file_obj:
response = self.app.post_json('/auctions/{}/questions'.format(
self.auction_id), question, status=201)
question_id = response.json['data']['id']
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/answer-question.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/questions/{}?acc_token={}'.format(
self.auction_id, question_id, owner_token), answer, status=200)
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/list-question.http', 'w') as self.app.file_obj:
response = self.app.get('/auctions/{}/questions'.format(
self.auction_id))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/get-answer.http', 'w') as self.app.file_obj:
response = self.app.get('/auctions/{}/questions/{}'.format(
self.auction_id, question_id))
self.assertEqual(response.status, '200 OK')
# Registering bid
#
self.app.authorization = ('Basic', ('broker', ''))
bids_access = {}
with open('docs/source/tutorial/register-bidder.http', 'w') as self.app.file_obj:
response = self.app.post_json('/auctions/{}/bids'.format(
self.auction_id), bid)
bid1_id = response.json['data']['id']
bids_access[bid1_id] = response.json['access']['token']
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/activate-bidder.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/bids/{}?acc_token={}'.format(
self.auction_id, bid1_id, bids_access[bid1_id]), {
"data": {
"status": "active"}})
self.assertEqual(response.status, '200 OK')
# Proposal Uploading
#
with open('docs/source/tutorial/upload-bid-proposal.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions/{}/bids/{}/documents?acc_token={}'.format(
self.auction_id,
bid1_id,
bids_access[bid1_id]),
{
'data': {
'title': u'Proposal.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
}})
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/bidder-documents.http', 'w') as self.app.file_obj:
response = self.app.get(
'/auctions/{}/bids/{}/documents?acc_token={}'.format(
self.auction_id, bid1_id, bids_access[bid1_id]))
self.assertEqual(response.status, '200 OK')
# Second bidder registration
#
with open('docs/source/tutorial/register-2nd-bidder.http', 'w') as self.app.file_obj:
response = self.app.post_json('/auctions/{}/bids'.format(
self.auction_id), bid2)
bid2_id = response.json['data']['id']
bids_access[bid2_id] = response.json['access']['token']
self.assertEqual(response.status, '201 Created')
# Auction
#
self.set_status('active.auction')
self.app.authorization = ('Basic', ('auction', ''))
patch_data = {
'auctionUrl': u'http://auction-sandbox.openprocurement.org/auctions/{}'.format(self.auction_id),
'bids': [
{
"id": bid1_id,
"participationUrl": u'http://auction-sandbox.openprocurement.org/auctions/{}?key_for_bid={}'.format(self.auction_id, bid1_id)
},
{
"id": bid2_id,
"participationUrl": u'http://auction-sandbox.openprocurement.org/auctions/{}?key_for_bid={}'.format(self.auction_id, bid2_id)
}
]
}
response = self.app.patch_json(
'/auctions/{}/auction?acc_token={}'.format(
self.auction_id, owner_token), {
'data': patch_data})
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('broker', ''))
with open('docs/source/tutorial/auction-url.http', 'w') as self.app.file_obj:
response = self.app.get('/auctions/{}'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/bidder-participation-url.http', 'w') as self.app.file_obj:
response = self.app.get(
'/auctions/{}/bids/{}?acc_token={}'.format(
self.auction_id, bid1_id, bids_access[bid1_id]))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/bidder2-participation-url.http', 'w') as self.app.file_obj:
response = self.app.get(
'/auctions/{}/bids/{}?acc_token={}'.format(
self.auction_id, bid2_id, bids_access[bid2_id]))
self.assertEqual(response.status, '200 OK')
# Confirming qualification
#
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/auctions/{}/auction'.format(self.auction_id))
auction_bids_data = response.json['data']['bids']
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id),
{'data': {'bids': auction_bids_data}})
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/awards'.format(self.auction_id))
with open('docs/source/tutorial/get-awards.http', 'w') as self.app.file_obj:
response = self.app.get(
'/auctions/{}/awards'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 2)
# get waiting award
award = [i for i in response.json['data']
if i['status'] == 'pending.waiting'][0]
award_id = award['id']
with open('docs/source/qualification/award-waiting-cancel.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(
self.auction_id, award_id, bids_access[award['bid_id']]), {"data": {"status": "cancelled"}})
self.assertEqual(response.status, '200 OK')
# get pending award
response = self.app.get('/auctions/{}/awards'.format(self.auction_id))
award_id = [i['id'] for i in response.json['data']
if i['status'] == 'pending.verification'][0]
with open('docs/source/tutorial/bidder-auction-protocol.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id,
award_id,
bids_access[bid2_id]),
{
'data': {
'title': u'SignedAuctionProtocol.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
"documentType": "auctionProtocol",
}})
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/owner-auction-protocol.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id,
award_id,
owner_token),
{
'data': {
'title': u'SignedAuctionProtocol.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
"documentType": "auctionProtocol",
}})
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/verify-protocol.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/awards/{}?acc_token={}'.format(
self.auction_id, award_id, owner_token), {
"data": {
"status": "pending.payment"}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/confirm-qualification.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(
self.auction_id, award_id, owner_token), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
response = self.app.get(
'/auctions/{}/contracts'.format(self.auction_id))
self.contract_id = response.json['data'][0]['id']
# Set contract value
auction = self.db.get(self.auction_id)
for i in auction.get('awards', []):
i['complaintPeriod']['endDate'] = i['complaintPeriod']['startDate']
self.db.save(auction)
# Setting contract period
period_dates = {
"period": {
"startDate": (now).isoformat(),
"endDate": (
now +
timedelta(
days=365)).isoformat()}}
with open('docs/source/tutorial/auction-contract-period.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/contracts/{}?acc_token={}'.format(
self.auction_id, self.contract_id, owner_token), {
'data': {
'period': period_dates["period"]}})
self.assertEqual(response.status, '200 OK')
# Uploading contract documentation
#
with open('docs/source/tutorial/auction-contract-upload-document.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions/{}/contracts/{}/documents?acc_token={}'.format(
self.auction_id,
self.contract_id,
owner_token),
{
'data': {
'title': u'contract_first_document.doc',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword',
}})
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/auction-contract-get-documents.http', 'w') as self.app.file_obj:
response = self.app.get('/auctions/{}/contracts/{}/documents'.format(
self.auction_id, self.contract_id))
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/auction-contract-upload-second-document.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions/{}/contracts/{}/documents?acc_token={}'.format(
self.auction_id,
self.contract_id,
owner_token),
{
'data': {
'title': u'contract_second_document.doc',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword',
}})
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/auction-contract-get-documents-again.http', 'w') as self.app.file_obj:
response = self.app.get('/auctions/{}/contracts/{}/documents'.format(
self.auction_id, self.contract_id))
self.assertEqual(response.status, '200 OK')
# Setting contract signature date and Contract signing
#
auction = self.db.get(self.auction_id)
for i in auction.get('awards', []):
i['complaintPeriod']['endDate'] = i['complaintPeriod']['startDate']
self.db.save(auction)
with open('docs/source/tutorial/auction-contract-sign.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/contracts/{}?acc_token={}'.format(
self.auction_id, self.contract_id, owner_token), {
'data': {
'status': 'active', "dateSigned": get_now().isoformat()}})
self.assertEqual(response.status, '200 OK')
# Preparing the cancellation request
#
self.set_status('active.awarded')
with open('docs/source/tutorial/prepare-cancellation.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions/{}/cancellations?acc_token={}'.format(
self.auction_id, owner_token), cancellation)
self.assertEqual(response.status, '201 Created')
cancellation_id = response.json['data']['id']
# Filling cancellation with protocol and supplementary documentation
#
with open('docs/source/tutorial/upload-cancellation-doc.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions/{}/cancellations/{}/documents?acc_token={}'.format(
self.auction_id,
cancellation_id,
owner_token),
{
'data': {
'title': u'Notice.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
}})
cancellation_doc_id = response.json['data']['id']
self.assertEqual(response.status, '201 Created')
with open('docs/source/tutorial/patch-cancellation.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/cancellations/{}/documents/{}?acc_token={}'.format(
self.auction_id, cancellation_id, cancellation_doc_id, owner_token), {
'data': {
"description": 'Changed description'}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/tutorial/update-cancellation-doc.http', 'w') as self.app.file_obj:
response = self.app.put_json(
'/auctions/{}/cancellations/{}/documents/{}?acc_token={}'.format(
self.auction_id,
cancellation_id,
cancellation_doc_id,
owner_token),
{
'data': {
'title': u'Notice-2.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
}})
self.assertEqual(response.status, '200 OK')
# Activating the request and cancelling auction
#
with open('docs/source/tutorial/active-cancellation.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/cancellations/{}?acc_token={}'.format(
self.auction_id, cancellation_id, owner_token), {
"data": {
"status": "active"}})
self.assertEqual(response.status, '200 OK')
def test_docs_disqualification(self):
self.create_auction()
# create bids
self.set_status('active.tendering')
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json(
'/auctions/{}/bids'.format(
self.auction_id), {
'data': {
"qualified": True, 'tenderers': [
bid["data"]["tenderers"][0]], "value": {
"amount": 450}}})
self.initial_bids_tokens[response.json['data']
['id']] = response.json['access']['token']
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json(
'/auctions/{}/bids'.format(
self.auction_id), {
'data': {
"qualified": True, 'tenderers': [
bid["data"]["tenderers"][0]], "value": {
"amount": 475}}})
self.initial_bids_tokens[response.json['data']
['id']] = response.json['access']['token']
# get auction info
self.set_status('active.auction')
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/auctions/{}/auction'.format(self.auction_id))
auction_bids_data = response.json['data']['bids']
# posting auction urls
response = self.app.patch_json('/auctions/{}/auction'.format(self.auction_id),
{
'data': {
'auctionUrl': 'https://auction.auction.url',
'bids': [
{
'id': i['id'],
'participationUrl': 'https://auction.auction.url/for_bid/{}'.format(i['id'])
}
for i in auction_bids_data
]
}
})
# posting auction results
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id),
{'data': {'bids': auction_bids_data}})
# get awards
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/auctions/{}/awards'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
award = [i for i in response.json['data']
if i['status'] == 'pending.verification'][0]
award_id = award['id']
bid_token = self.initial_bids_tokens[award['bid_id']]
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award_id, bid_token), {'data': {
'title': u'auction_protocol.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
'documentType': 'auctionProtocol',
}})
self.assertEqual(response.status, '201 Created')
response = self.app.post_json('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award_id, self.auction_token), {'data': {
'title': u'Unsuccessful_Reason.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
}})
self.assertEqual(response.status, '201 Created')
response = self.app.patch_json(
'/auctions/{}/awards/{}?acc_token={}'.format(
self.auction_id, award_id, self.auction_token), {
"data": {
"status": "unsuccessful"}})
self.assertEqual(response.status, '200 OK')
response = self.app.get('/auctions/{}/awards'.format(self.auction_id))
award = [i for i in response.json['data']
if i['status'] == 'pending.verification'][0]
award_id2 = award['id']
bid_token = self.initial_bids_tokens[award['bid_id']]
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award_id2, bid_token), {'data': {
'title': u'auction_protocol.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
'documentType': 'auctionProtocol',
}})
self.assertEqual(response.status, '201 Created')
with open('docs/source/qualification/award-active-unsuccessful-upload.http', 'w') as self.app.file_obj:
response = self.app.post_json('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award_id2, self.auction_token), {'data': {
'title': u'Disqualified_reason.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
"description": "Disqualification reason"
}})
self.assertEqual(response.status, '201 Created')
with open('docs/source/qualification/award-active-disqualify.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/awards/{}?acc_token={}'.format(
self.auction_id,
award_id2,
self.auction_token),
{
"data": {
"status": "unsuccessful",
"title": "Disqualified",
"description": "Candidate didn’t sign the auction protocol in 3 business days"}})
self.assertEqual(response.status, '200 OK')
def _test_docs_complaints(self):
# ##################### Tender Conditions Claims/Complaints ############
#
# Claim Submission (with documents)
#
self.create_auction()
with open('docs/source/complaints/complaint-submission.http', 'w') as self.app.file_obj:
response = self.app.post_json('/auctions/{}/complaints'.format(
self.auction_id), test_complaint_data)
self.assertEqual(response.status, '201 Created')
complaint1_id = response.json['data']['id']
complaint1_token = response.json['access']['token']
with open('docs/source/complaints/complaint-submission-upload.http', 'w') as self.app.file_obj:
response = self.app.post_json('/auctions/{}/complaints/{}/documents?acc_token={}'.format(
self.auction_id, complaint1_id, complaint1_token), {'data': {
'title': u'Complaint_Attachement.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
}})
self.assertEqual(response.status, '201 Created')
with open('docs/source/complaints/complaint-claim.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/complaints/{}?acc_token={}'.format(
self.auction_id, complaint1_id, complaint1_token), {
"data": {
"status": "claim"}})
self.assertEqual(response.status, '200 OK')
# Claim Submission (without documents)
#
test_complaint_data['data']['status'] = 'claim'
with open('docs/source/complaints/complaint-submission-claim.http', 'w') as self.app.file_obj:
response = self.app.post_json('/auctions/{}/complaints'.format(
self.auction_id), test_complaint_data)
self.assertEqual(response.status, '201 Created')
complaint2_id = response.json['data']['id']
complaint2_token = response.json['access']['token']
# Tender Conditions Claim/Complaint Retrieval
#
with open('docs/source/complaints/complaints-list.http', 'w') as self.app.file_obj:
self.app.authorization = None
response = self.app.get(
'/auctions/{}/complaints'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
with open('docs/source/complaints/complaint.http', 'w') as self.app.file_obj:
response = self.app.get(
'/auctions/{}/complaints/{}'.format(self.auction_id, complaint1_id))
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('broker', ''))
# Claim's Answer
#
with open('docs/source/complaints/complaint-answer.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/complaints/{}?acc_token={}'.format(
self.auction_id,
complaint1_id,
self.auction_token),
{
"data": {
"status": "answered",
"resolutionType": "resolved",
"tendererAction": "Виправлено неконкурентні умови",
"resolution": "Виправлено неконкурентні умови"}})
self.assertEqual(response.status, '200 OK')
# Satisfied Claim
#
with open('docs/source/complaints/complaint-satisfy.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/complaints/{}?acc_token={}'.format(
self.auction_id, complaint1_id, complaint1_token), {
"data": {
"status": "resolved", "satisfied": True}})
self.assertEqual(response.status, '200 OK')
# Satisfied Claim
#
response = self.app.patch_json(
'/auctions/{}/complaints/{}?acc_token={}'.format(
self.auction_id,
complaint2_id,
self.auction_token),
{
"data": {
"status": "answered",
"resolutionType": "resolved",
"resolution": "Виправлено неконкурентні умови"}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/complaints/complaint-escalate.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/complaints/{}?acc_token={}'.format(
self.auction_id, complaint2_id, complaint2_token), {
"data": {
"status": "pending", "satisfied": False}})
self.assertEqual(response.status, '200 OK')
# Rejecting Tender Conditions Complaint
#
self.app.authorization = ('Basic', ('reviewer', ''))
with open('docs/source/complaints/complaint-reject.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/complaints/{}'.format(
self.auction_id, complaint2_id), {
"data": {
"status": "invalid"}})
self.assertEqual(response.status, '200 OK')
# Submitting Tender Conditions Complaint Resolution
#
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json('/auctions/{}/complaints'.format(
self.auction_id), test_complaint_data)
self.assertEqual(response.status, '201 Created')
complaint3_id = response.json['data']['id']
complaint3_token = response.json['access']['token']
self.app.patch_json(
'/auctions/{}/complaints/{}?acc_token={}'.format(
self.auction_id,
complaint3_id,
self.auction_token),
{
"data": {
"status": "answered",
"resolutionType": "resolved",
"resolution": "Виправлено неконкурентні умови"}})
self.app.patch_json(
'/auctions/{}/complaints/{}?acc_token={}'.format(
self.auction_id, complaint3_id, complaint3_token), {
"data": {
"status": "pending", "satisfied": False}})
response = self.app.post_json('/auctions/{}/complaints'.format(
self.auction_id), test_complaint_data)
self.assertEqual(response.status, '201 Created')
del test_complaint_data['data']['status']
complaint4_id = response.json['data']['id']
complaint4_token = response.json['access']['token']
self.app.patch_json(
'/auctions/{}/complaints/{}?acc_token={}'.format(
self.auction_id,
complaint4_id,
self.auction_token),
{
"data": {
"status": "answered",
"resolutionType": "resolved",
"resolution": "Виправлено неконкурентні умови"}})
self.app.patch_json(
'/auctions/{}/complaints/{}?acc_token={}'.format(
self.auction_id, complaint4_id, complaint4_token), {
"data": {
"status": "pending", "satisfied": False}})
self.app.authorization = ('Basic', ('reviewer', ''))
with open('docs/source/complaints/complaint-resolution-upload.http', 'w') as self.app.file_obj:
response = self.app.post_json('/auctions/{}/complaints/{}/documents'.format(
self.auction_id, complaint3_id), {'data': {
'title': u'ComplaintResolution.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
}})
self.assertEqual(response.status, '201 Created')
with open('docs/source/complaints/complaint-resolve.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/complaints/{}'.format(
self.auction_id, complaint3_id), {
"data": {
"status": "resolved"}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/complaints/complaint-decline.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/complaints/{}'.format(
self.auction_id, complaint4_id), {
"data": {
"status": "declined"}})
self.assertEqual(response.status, '200 OK')
# create bids
self.set_status('active.tendering')
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json(
'/auctions/{}/bids'.format(
self.auction_id), {
'data': {
"qualified": True, 'tenderers': [
bid["data"]["tenderers"][0]], "value": {
"amount": 450}}})
bid_token = response.json['access']['token']
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json(
'/auctions/{}/bids'.format(
self.auction_id), {
'data': {
"qualified": True, 'tenderers': [
bid["data"]["tenderers"][0]], "value": {
"amount": 475}}})
# get auction info
self.set_status('active.auction')
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.get('/auctions/{}/auction'.format(self.auction_id))
auction_bids_data = response.json['data']['bids']
# posting auction urls
response = self.app.patch_json('/auctions/{}/auction'.format(self.auction_id),
{
'data': {
'auctionUrl': 'https://auction.auction.url',
'bids': [
{
'id': i['id'],
'participationUrl': 'https://auction.auction.url/for_bid/{}'.format(i['id'])
}
for i in auction_bids_data
]
}
})
# posting auction results
self.app.authorization = ('Basic', ('auction', ''))
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id),
{'data': {'bids': auction_bids_data}})
# get awards
self.app.authorization = ('Basic', ('broker', ''))
with open('docs/source/qualification/awards-get.http', 'w') as self.app.file_obj:
response = self.app.get(
'/auctions/{}/awards'.format(self.auction_id))
self.assertEqual(response.status, '200 OK')
award_id = [i['id']
for i in response.json['data'] if i['status'] == 'pending'][0]
with open('docs/source/qualification/award-pending-upload.http', 'w') as self.app.file_obj:
response = self.app.post_json('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award_id, self.auction_token), {'data': {
'title': u'Unsuccessful_Reason.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
}})
self.assertEqual(response.status, '201 Created')
with open('docs/source/qualification/award-pending-unsuccessful.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/awards/{}?acc_token={}'.format(
self.auction_id, award_id, self.auction_token), {
"data": {
"status": "unsuccessful"}})
self.assertEqual(response.status, '200 OK')
response = self.app.get('/auctions/{}/awards'.format(self.auction_id))
award_id2 = [i['id']
for i in response.json['data'] if i['status'] == 'pending'][0]
with open('docs/source/qualification/award-pending-active.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/awards/{}?acc_token={}'.format(
self.auction_id, award_id2, self.auction_token), {
"data": {
"status": "active"}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/qualification/award-active-get.http', 'w') as self.app.file_obj:
response = self.app.get('/auctions/{}/awards/{}'.format(
self.auction_id, award_id2))
self.assertEqual(response.status, '200 OK')
with open('docs/source/qualification/award-active-cancel.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/awards/{}?acc_token={}'.format(
self.auction_id, award_id2, self.auction_token), {
"data": {
"status": "cancelled"}})
self.assertEqual(response.status, '200 OK')
response = self.app.get(
'/auctions/{}/awards?acc_token={}'.format(self.auction_id, self.auction_token))
award_id3 = [i['id']
for i in response.json['data'] if i['status'] == 'pending'][0]
with open('docs/source/qualification/award-active-cancel-upload.http', 'w') as self.app.file_obj:
response = self.app.post_json('/auctions/{}/awards/{}/documents?acc_token={}'.format(
self.auction_id, award_id3, self.auction_token), {'data': {
'title': u'Disqualified_reason.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
"documentType": "notice",
"description": "Disqualified reason"
}})
self.assertEqual(response.status, '201 Created')
with open('docs/source/qualification/award-active-cancel-disqualify.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/awards/{}?acc_token={}'.format(
self.auction_id,
award_id3,
self.auction_token),
{
"data": {
"status": "unsuccessful",
"title": "Disqualified",
"description": "Candidate didn’t sign the auction protocol in 3 business days"}})
self.assertEqual(response.status, '200 OK')
# ##################### Tender Award Claims/Complaints #################
#
# Tender Award Claim Submission (with documents)
#
with open('docs/source/complaints/award-complaint-submission.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions/{}/awards/{}/complaints?acc_token={}'.format(
self.auction_id, award_id, bid_token), test_complaint_data)
self.assertEqual(response.status, '201 Created')
complaint1_id = response.json['data']['id']
complaint1_token = response.json['access']['token']
with open('docs/source/complaints/award-complaint-submission-upload.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions/{}/awards/{}/complaints/{}/documents?acc_token={}'.format(
self.auction_id,
award_id,
complaint1_id,
complaint1_token),
{
'data': {
'title': u'Complaint_Attachement.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
}})
self.assertEqual(response.status, '201 Created')
with open('docs/source/complaints/award-complaint-claim.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/awards/{}/complaints/{}?acc_token={}'.format(
self.auction_id, award_id, complaint1_id, complaint1_token), {
"data": {
"status": "claim"}})
self.assertEqual(response.status, '200 OK')
# Tender Award Claim Submission (without documents)
#
test_complaint_data['data']['status'] = 'claim'
with open('docs/source/complaints/award-complaint-submission-claim.http', 'w') as self.app.file_obj:
response = self.app.post_json(
'/auctions/{}/awards/{}/complaints?acc_token={}'.format(
self.auction_id, award_id, bid_token), test_complaint_data)
self.assertEqual(response.status, '201 Created')
complaint2_id = response.json['data']['id']
complaint2_token = response.json['access']['token']
# Tender Award Claim/Complaint Retrieval
#
with open('docs/source/complaints/award-complaints-list.http', 'w') as self.app.file_obj:
self.app.authorization = None
response = self.app.get(
'/auctions/{}/awards/{}/complaints'.format(self.auction_id, award_id,))
self.assertEqual(response.status, '200 OK')
with open('docs/source/complaints/award-complaint.http', 'w') as self.app.file_obj:
response = self.app.get(
'/auctions/{}/awards/{}/complaints/{}'.format(self.auction_id, award_id, complaint1_id))
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('broker', ''))
# Claim's Answer
#
with open('docs/source/complaints/award-complaint-answer.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/awards/{}/complaints/{}?acc_token={}'.format(
self.auction_id,
award_id,
complaint1_id,
self.auction_token),
{
"data": {
"status": "answered",
"resolutionType": "resolved",
"tendererAction": "Виправлено неконкурентні умови",
"resolution": "Виправлено неконкурентні умови"}})
self.assertEqual(response.status, '200 OK')
# Satisfied Claim
#
with open('docs/source/complaints/award-complaint-satisfy.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/awards/{}/complaints/{}?acc_token={}'.format(
self.auction_id, award_id, complaint1_id, complaint1_token), {
"data": {
"status": "resolved", "satisfied": True}})
self.assertEqual(response.status, '200 OK')
# Satisfied Claim
#
response = self.app.patch_json(
'/auctions/{}/awards/{}/complaints/{}?acc_token={}'.format(
self.auction_id,
award_id,
complaint2_id,
self.auction_token),
{
"data": {
"status": "answered",
"resolutionType": "resolved",
"resolution": "Виправлено неконкурентні умови"}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/complaints/award-complaint-escalate.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/awards/{}/complaints/{}?acc_token={}'.format(
self.auction_id, award_id, complaint2_id, complaint2_token), {
"data": {
"status": "pending", "satisfied": False}})
self.assertEqual(response.status, '200 OK')
# Rejecting Tender Award Complaint
#
self.app.authorization = ('Basic', ('reviewer', ''))
with open('docs/source/complaints/award-complaint-reject.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/auctions/{}/awards/{}/complaints/{}'.format(
self.auction_id, award_id, complaint2_id), {"data": {"status": "invalid"}})
self.assertEqual(response.status, '200 OK')
# Submitting Tender Award Complaint Resolution
#
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json(
'/auctions/{}/awards/{}/complaints?acc_token={}'.format(
self.auction_id, award_id, bid_token), test_complaint_data)
self.assertEqual(response.status, '201 Created')
complaint3_id = response.json['data']['id']
complaint3_token = response.json['access']['token']
self.app.patch_json(
'/auctions/{}/awards/{}/complaints/{}?acc_token={}'.format(
self.auction_id,
award_id,
complaint3_id,
self.auction_token),
{
"data": {
"status": "answered",
"resolutionType": "resolved",
"resolution": "Виправлено неконкурентні умови"}})
self.app.patch_json(
'/auctions/{}/awards/{}/complaints/{}?acc_token={}'.format(
self.auction_id, award_id, complaint3_id, complaint3_token), {
"data": {
"status": "pending", "satisfied": False}})
response = self.app.post_json(
'/auctions/{}/awards/{}/complaints?acc_token={}'.format(
self.auction_id, award_id, bid_token), test_complaint_data)
self.assertEqual(response.status, '201 Created')
complaint4_id = response.json['data']['id']
complaint4_token = response.json['access']['token']
self.app.patch_json(
'/auctions/{}/awards/{}/complaints/{}?acc_token={}'.format(
self.auction_id,
award_id,
complaint4_id,
self.auction_token),
{
"data": {
"status": "answered",
"resolutionType": "resolved",
"resolution": "Виправлено неконкурентні умови"}})
self.app.patch_json(
'/auctions/{}/awards/{}/complaints/{}?acc_token={}'.format(
self.auction_id, award_id, complaint4_id, complaint4_token), {
"data": {
"status": "pending", "satisfied": False}})
self.app.authorization = ('Basic', ('reviewer', ''))
with open('docs/source/complaints/award-complaint-resolution-upload.http', 'w') as self.app.file_obj:
response = self.app.post_json('/auctions/{}/awards/{}/complaints/{}/documents'.format(
self.auction_id, award_id, complaint3_id), {'data': {
'title': u'ComplaintResolution.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/pdf',
}})
self.assertEqual(response.status, '201 Created')
with open('docs/source/complaints/award-complaint-resolve.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/auctions/{}/awards/{}/complaints/{}'.format(
self.auction_id, award_id, complaint3_id), {"data": {"status": "resolved"}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/complaints/award-complaint-decline.http', 'w') as self.app.file_obj:
response = self.app.patch_json('/auctions/{}/awards/{}/complaints/{}'.format(
self.auction_id, award_id, complaint4_id), {"data": {"status": "declined"}})
self.assertEqual(response.status, '200 OK')
self.app.authorization = ('Basic', ('broker', ''))
with open('docs/source/qualification/awards-unsuccessful-get1.http', 'w') as self.app.file_obj:
response = self.app.get('/auctions/{}/awards'.format(
self.auction_id, award_id))
self.assertEqual(response.status, '200 OK')
with open('docs/source/qualification/award-unsuccessful-cancel.http', 'w') as self.app.file_obj:
response = self.app.patch_json(
'/auctions/{}/awards/{}?acc_token={}'.format(
self.auction_id, award_id, self.auction_token), {
"data": {
"status": "cancelled"}})
self.assertEqual(response.status, '200 OK')
with open('docs/source/qualification/awards-unsuccessful-get2.http', 'w') as self.app.file_obj:
response = self.app.get('/auctions/{}/awards'.format(
self.auction_id, award_id))
self.assertEqual(response.status, '200 OK')
| 44.522398 | 145 | 0.517289 |
9e118a0f9fc03cff8cca5ff5cc6487e2ef41c93a | 9,218 | py | Python | sdks/python/apache_beam/coders/standard_coders_test.py | jxub/beam | 8222fcc978a54d98d385c108fb5fcf7615d74829 | [
"Apache-2.0"
] | 35 | 2016-09-22T22:53:14.000Z | 2020-02-13T15:12:21.000Z | sdks/python/apache_beam/coders/standard_coders_test.py | jxub/beam | 8222fcc978a54d98d385c108fb5fcf7615d74829 | [
"Apache-2.0"
] | 71 | 2018-05-23T22:20:02.000Z | 2019-04-30T15:37:46.000Z | sdks/python/apache_beam/coders/standard_coders_test.py | jxub/beam | 8222fcc978a54d98d385c108fb5fcf7615d74829 | [
"Apache-2.0"
] | 88 | 2016-11-27T02:16:11.000Z | 2020-02-28T05:10:26.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for coders that must be consistent across all Beam SDKs.
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import math
import os.path
import sys
import unittest
from builtins import map
from typing import Dict
from typing import Tuple
import yaml
from apache_beam.coders import coder_impl
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import schema_pb2
from apache_beam.runners import pipeline_context
from apache_beam.transforms import window
from apache_beam.transforms.window import IntervalWindow
from apache_beam.typehints import schemas
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import PaneInfo
from apache_beam.utils.windowed_value import PaneInfoTiming
STANDARD_CODERS_YAML = os.path.normpath(
os.path.join(
os.path.dirname(__file__), '../portability/api/standard_coders.yaml'))
def _load_test_cases(test_yaml):
"""Load test data from yaml file and return an iterable of test cases.
See ``standard_coders.yaml`` for more details.
"""
if not os.path.exists(test_yaml):
raise ValueError('Could not find the test spec: %s' % test_yaml)
with open(test_yaml, 'rb') as coder_spec:
for ix, spec in enumerate(yaml.load_all(coder_spec)):
spec['index'] = ix
name = spec.get('name', spec['coder']['urn'].split(':')[-2])
yield [name, spec]
def parse_float(s):
x = float(s)
if math.isnan(x):
# In Windows, float('NaN') has opposite sign from other platforms.
# For the purpose of this test, we just need consistency.
x = abs(x)
return x
def value_parser_from_schema(schema):
def attribute_parser_from_type(type_):
# TODO: This should be exhaustive
type_info = type_.WhichOneof("type_info")
if type_info == "atomic_type":
return schemas.ATOMIC_TYPE_TO_PRIMITIVE[type_.atomic_type]
elif type_info == "array_type":
element_parser = attribute_parser_from_type(type_.array_type.element_type)
return lambda x: list(map(element_parser, x))
elif type_info == "map_type":
key_parser = attribute_parser_from_type(type_.array_type.key_type)
value_parser = attribute_parser_from_type(type_.array_type.value_type)
return lambda x: dict(
(key_parser(k), value_parser(v)) for k, v in x.items())
parsers = [(field.name, attribute_parser_from_type(field.type))
for field in schema.fields]
constructor = schemas.named_tuple_from_schema(schema)
def value_parser(x):
result = []
for name, parser in parsers:
value = x.pop(name)
result.append(None if value is None else parser(value))
if len(x):
raise ValueError(
"Test data contains attributes that don't exist in the schema: {}".
format(', '.join(x.keys())))
return constructor(*result)
return value_parser
class StandardCodersTest(unittest.TestCase):
_urn_to_json_value_parser = {
'beam:coder:bytes:v1': lambda x: x.encode('utf-8'),
'beam:coder:bool:v1': lambda x: x,
'beam:coder:string_utf8:v1': lambda x: x,
'beam:coder:varint:v1': lambda x: x,
'beam:coder:kv:v1': lambda x,
key_parser,
value_parser: (key_parser(x['key']), value_parser(x['value'])),
'beam:coder:interval_window:v1': lambda x: IntervalWindow(
start=Timestamp(micros=(x['end'] - x['span']) * 1000),
end=Timestamp(micros=x['end'] * 1000)),
'beam:coder:iterable:v1': lambda x,
parser: list(map(parser, x)),
'beam:coder:global_window:v1': lambda x: window.GlobalWindow(),
'beam:coder:windowed_value:v1': lambda x,
value_parser,
window_parser: windowed_value.create(
value_parser(x['value']),
x['timestamp'] * 1000,
tuple([window_parser(w) for w in x['windows']])),
'beam:coder:param_windowed_value:v1': lambda x,
value_parser,
window_parser: windowed_value.create(
value_parser(x['value']),
x['timestamp'] * 1000,
tuple([window_parser(w) for w in x['windows']]),
PaneInfo(
x['pane']['is_first'],
x['pane']['is_last'],
PaneInfoTiming.from_string(x['pane']['timing']),
x['pane']['index'],
x['pane']['on_time_index'])),
'beam:coder:timer:v1': lambda x,
payload_parser: dict(
payload=payload_parser(x['payload']),
timestamp=Timestamp(micros=x['timestamp'] * 1000)),
'beam:coder:double:v1': parse_float,
}
def test_standard_coders(self):
for name, spec in _load_test_cases(STANDARD_CODERS_YAML):
logging.info('Executing %s test.', name)
self._run_standard_coder(name, spec)
def _run_standard_coder(self, name, spec):
def assert_equal(actual, expected):
"""Handle nan values which self.assertEqual fails on."""
if (isinstance(actual, float) and isinstance(expected, float) and
math.isnan(actual) and math.isnan(expected)):
return
self.assertEqual(actual, expected)
coder = self.parse_coder(spec['coder'])
parse_value = self.json_value_parser(spec['coder'])
nested_list = [spec['nested']] if 'nested' in spec else [True, False]
for nested in nested_list:
for expected_encoded, json_value in spec['examples'].items():
value = parse_value(json_value)
expected_encoded = expected_encoded.encode('latin1')
if not spec['coder'].get('non_deterministic', False):
actual_encoded = encode_nested(coder, value, nested)
if self.fix and actual_encoded != expected_encoded:
self.to_fix[spec['index'], expected_encoded] = actual_encoded
else:
self.assertEqual(expected_encoded, actual_encoded)
decoded = decode_nested(coder, expected_encoded, nested)
assert_equal(decoded, value)
else:
# Only verify decoding for a non-deterministic coder
self.assertEqual(
decode_nested(coder, expected_encoded, nested), value)
def parse_coder(self, spec):
context = pipeline_context.PipelineContext()
coder_id = str(hash(str(spec)))
component_ids = [
context.coders.get_id(self.parse_coder(c))
for c in spec.get('components', ())
]
context.coders.put_proto(
coder_id,
beam_runner_api_pb2.Coder(
spec=beam_runner_api_pb2.FunctionSpec(
urn=spec['urn'],
payload=spec.get('payload', '').encode('latin1')),
component_coder_ids=component_ids))
return context.coders.get_by_id(coder_id)
def json_value_parser(self, coder_spec):
# TODO: integrate this with the logic for the other parsers
if coder_spec['urn'] == 'beam:coder:row:v1':
schema = schema_pb2.Schema.FromString(
coder_spec['payload'].encode('latin1'))
return value_parser_from_schema(schema)
component_parsers = [
self.json_value_parser(c) for c in coder_spec.get('components', ())
]
return lambda x: self._urn_to_json_value_parser[coder_spec['urn']](
x, *component_parsers)
# Used when --fix is passed.
fix = False
to_fix = {} # type: Dict[Tuple[int, bytes], bytes]
@classmethod
def tearDownClass(cls):
if cls.fix and cls.to_fix:
print("FIXING", len(cls.to_fix), "TESTS")
doc_sep = '\n---\n'
docs = open(STANDARD_CODERS_YAML).read().split(doc_sep)
def quote(s):
return json.dumps(s.decode('latin1')).replace(r'\u0000', r'\0')
for (doc_ix, expected_encoded), actual_encoded in cls.to_fix.items():
print(quote(expected_encoded), "->", quote(actual_encoded))
docs[doc_ix] = docs[doc_ix].replace(
quote(expected_encoded) + ':', quote(actual_encoded) + ':')
open(STANDARD_CODERS_YAML, 'w').write(doc_sep.join(docs))
def encode_nested(coder, value, nested=True):
out = coder_impl.create_OutputStream()
coder.get_impl().encode_to_stream(value, out, nested)
return out.get()
def decode_nested(coder, encoded, nested=True):
return coder.get_impl().decode_from_stream(
coder_impl.create_InputStream(encoded), nested)
if __name__ == '__main__':
if '--fix' in sys.argv:
StandardCodersTest.fix = True
sys.argv.remove('--fix')
unittest.main()
| 36.291339 | 80 | 0.681384 |
b7689c1017c3abeac14a53884ae5c3439e0dde37 | 294 | py | Python | houdini/houdini_server/apps.py | TrianglePlusPlus/houdini | 292b1fb395fc34dbefa8f891cc94bb811f5805bb | [
"MIT"
] | 2 | 2017-09-25T00:30:22.000Z | 2021-02-04T22:11:54.000Z | houdini/houdini_server/apps.py | TrianglePlusPlus/houdini | 292b1fb395fc34dbefa8f891cc94bb811f5805bb | [
"MIT"
] | 11 | 2016-12-29T22:05:57.000Z | 2020-06-05T17:23:10.000Z | houdini/houdini_server/apps.py | TrianglePlusPlus/houdini | 292b1fb395fc34dbefa8f891cc94bb811f5805bb | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class HoudiniServerConfig(AppConfig):
name = 'houdini_server'
verbose_name = 'Houdini Server'
def ready(self):
"""
Connects up the signal receivers in signals.py
:return:
"""
import houdini_server.signals
| 21 | 54 | 0.642857 |
4f1cfe270815971dba33c2b46e4db2539a50a9ca | 54,460 | py | Python | packages/vaex-core/vaex/expression.py | yohplala/vaex | ca7927a19d259576ca0403ee207a597aaef6adc2 | [
"MIT"
] | null | null | null | packages/vaex-core/vaex/expression.py | yohplala/vaex | ca7927a19d259576ca0403ee207a597aaef6adc2 | [
"MIT"
] | null | null | null | packages/vaex-core/vaex/expression.py | yohplala/vaex | ca7927a19d259576ca0403ee207a597aaef6adc2 | [
"MIT"
] | null | null | null | import ast
import os
import base64
import cloudpickle as pickle
import functools
import operator
import six
import collections
import weakref
from future.utils import with_metaclass
import numpy as np
import tabulate
import pyarrow as pa
from vaex.datatype import DataType
from vaex.docstrings import docsubst
from vaex.utils import _ensure_strings_from_expressions, _ensure_string_from_expression
from vaex.column import ColumnString, _to_string_sequence
from .hash import counter_type_from_dtype
import vaex.serialize
from . import expresso
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
try:
collectionsAbc = collections.abc
except AttributeError:
collectionsAbc = collections
# TODO: repeated from dataframe.py
default_shape = 128
PRINT_MAX_COUNT = 10
expression_namespace = {}
expression_namespace['nan'] = np.nan
expression_namespace = {}
expression_namespace['nan'] = np.nan
_binary_ops = [
dict(code="+", name='add', op=operator.add),
dict(code="in", name='contains', op=operator.contains),
dict(code="/", name='truediv', op=operator.truediv),
dict(code="//", name='floordiv', op=operator.floordiv),
dict(code="&", name='and', op=operator.and_),
dict(code="^", name='xor', op=operator.xor),
dict(code="|", name='or', op=operator.or_),
dict(code="**", name='pow', op=operator.pow),
dict(code="is", name='is', op=operator.is_),
dict(code="is not", name='is_not', op=operator.is_not),
dict(code="<<", name='lshift', op=operator.lshift),
dict(code="%", name='mod', op=operator.mod),
dict(code="*", name='mul', op=operator.mul),
dict(code=">>", name='rshift', op=operator.rshift),
dict(code="-", name='sub', op=operator.sub),
dict(code="<", name='lt', op=operator.lt),
dict(code="<=", name='le', op=operator.le),
dict(code="==", name='eq', op=operator.eq),
dict(code="!=", name='ne', op=operator.ne),
dict(code=">=", name='ge', op=operator.ge),
dict(code=">", name='gt', op=operator.gt),
]
if hasattr(operator, 'div'):
_binary_ops.append(dict(code="/", name='div', op=operator.div))
if hasattr(operator, 'matmul'):
_binary_ops.append(dict(code="@", name='matmul', op=operator.matmul))
reversable = 'add sub mul matmul truediv floordiv mod divmod pow lshift rshift and xor or'.split()
_unary_ops = [
dict(code="~", name='invert', op=operator.invert),
dict(code="-", name='neg', op=operator.neg),
dict(code="+", name='pos', op=operator.pos),
]
class Meta(type):
def __new__(upperattr_metaclass, future_class_name,
future_class_parents, attrs):
# attrs = {}
for op in _binary_ops:
def wrap(op=op):
def f(a, b):
self = a
# print(op, a, b)
if isinstance(b, str) and self.dtype.is_datetime:
b = np.datetime64(b)
if self.df.is_category(self.expression) and self.df._future_behaviour and not isinstance(b, Expression):
labels = self.df.category_labels(self.expression)
if b not in labels:
raise ValueError(f'Value {b} not present in {labels}')
b = labels.index(b)
a = self.index_values()
try:
stringy = isinstance(b, str) or b.is_string()
except:
# this can happen when expression is a literal, like '1' (used in propagate_unc)
# which causes the dtype to fail
stringy = False
if stringy:
if isinstance(b, str):
b = repr(b)
if op['code'] == '==':
expression = 'str_equals({0}, {1})'.format(a.expression, b)
elif op['code'] == '!=':
expression = 'str_notequals({0}, {1})'.format(a.expression, b)
elif op['code'] == '+':
expression = 'str_cat({0}, {1})'.format(a.expression, b)
else:
raise ValueError('operand %r not supported for string comparison' % op['code'])
return Expression(self.ds, expression=expression)
else:
if isinstance(b, Expression):
assert b.ds == a.ds
b = b.expression
elif isinstance(b, (np.timedelta64)):
df = a.ds
b = df.add_variable('var_time_delta', b, unique=True)
elif isinstance(b, (np.datetime64)):
df = a.ds
b = df.add_variable('var_date_time', b, unique=True)
expression = '({0} {1} {2})'.format(a.expression, op['code'], b)
return Expression(self.ds, expression=expression)
attrs['__%s__' % op['name']] = f
if op['name'] in reversable:
def f(a, b):
self = a
if isinstance(b, str):
if op['code'] == '+':
expression = 'str_cat({1}, {0})'.format(a.expression, repr(b))
else:
raise ValueError('operand %r not supported for string comparison' % op['code'])
return Expression(self.ds, expression=expression)
else:
if isinstance(b, Expression):
assert b.ds == a.ds
b = b.expression
expression = '({2} {1} {0})'.format(a.expression, op['code'], b)
return Expression(self.ds, expression=expression)
attrs['__r%s__' % op['name']] = f
wrap(op)
for op in _unary_ops:
def wrap(op=op):
def f(a):
self = a
expression = '{0}({1})'.format(op['code'], a.expression)
return Expression(self.ds, expression=expression)
attrs['__%s__' % op['name']] = f
wrap(op)
return type(future_class_name, future_class_parents, attrs)
class DateTime(object):
"""DateTime operations
Usually accessed using e.g. `df.birthday.dt.dayofweek`
"""
def __init__(self, expression):
self.expression = expression
class TimeDelta(object):
"""TimeDelta operations
Usually accessed using e.g. `df.delay.td.days`
"""
def __init__(self, expression):
self.expression = expression
class StringOperations(object):
"""String operations.
Usually accessed using e.g. `df.name.str.lower()`
"""
def __init__(self, expression):
self.expression = expression
class StringOperationsPandas(object):
"""String operations using Pandas Series (much slower)"""
def __init__(self, expression):
self.expression = expression
class Expression(with_metaclass(Meta)):
"""Expression class"""
def __init__(self, ds, expression, ast=None):
self.ds = ds
assert not isinstance(ds, Expression)
if isinstance(expression, Expression):
expression = expression.expression
if expression is None and ast is None:
raise ValueError('Not both expression and the ast can be None')
self._ast = ast
self._expression = expression
self.df._expressions.append(weakref.ref(self))
self._ast_names = None
@property
def _label(self):
'''If a column is an invalid identified, the expression is df['long name']
This will return 'long name' in that case, otherwise simply the expression
'''
ast = self.ast
if isinstance(ast, expresso._ast.Subscript):
value = ast.slice.value
if isinstance(value, expresso.ast_Str):
return value.s
if isinstance(value, str): # py39+
return value
return self.expression
def fingerprint(self):
fp = vaex.cache.fingerprint(self.expression, self.df.fingerprint())
return f'expression-{fp}'
def copy(self, df=None):
"""Efficiently copies an expression.
Expression objects have both a string and AST representation. Creating
the AST representation involves parsing the expression, which is expensive.
Using copy will deepcopy the AST when the expression was already parsed.
:param df: DataFrame for which the expression will be evaluated (self.df if None)
"""
# expression either has _expression or _ast not None
if df is None:
df = self.df
if self._expression is not None:
expression = Expression(df, self._expression)
if self._ast is not None:
expression._ast = copy.deepcopy(self._ast)
elif self._ast is not None:
expression = Expression(df, copy.deepcopy(self._ast))
if self._ast is not None:
expression._ast = self._ast
return expression
@property
def ast(self):
"""Returns the abstract syntax tree (AST) of the expression"""
if self._ast is None:
self._ast = expresso.parse_expression(self.expression)
return self._ast
@property
def ast_names(self):
if self._ast_names is None:
self._ast_names = expresso.names(self.ast)
return self._ast_names
@property
def _ast_slices(self):
return expresso.slices(self.ast)
@property
def expression(self):
if self._expression is None:
self._expression = expresso.node_to_string(self.ast)
return self._expression
@expression.setter
def expression(self, value):
# if we reassign to expression, we clear the ast cache
if value != self._expression:
self._expression = value
self._ast = None
def __bool__(self):
"""Cast expression to boolean. Only supports (<expr1> == <expr2> and <expr1> != <expr2>)
The main use case for this is to support assigning to traitlets. e.g.:
>>> bool(expr1 == expr2)
This will return True when expr1 and expr2 are exactly the same (in string representation). And similarly for:
>>> bool(expr != expr2)
All other cases will return True.
"""
# this is to make traitlets detect changes
import _ast
if isinstance(self.ast, _ast.Compare) and len(self.ast.ops) == 1 and isinstance(self.ast.ops[0], _ast.Eq):
return expresso.node_to_string(self.ast.left) == expresso.node_to_string(self.ast.comparators[0])
if isinstance(self.ast, _ast.Compare) and len(self.ast.ops) == 1 and isinstance(self.ast.ops[0], _ast.NotEq):
return expresso.node_to_string(self.ast.left) != expresso.node_to_string(self.ast.comparators[0])
return True
@property
def df(self):
# lets gradually move to using .df
return self.ds
@property
def dtype(self):
return self.df.data_type(self.expression)
# TODO: remove this method?
def data_type(self, array_type=None, axis=0):
return self.df.data_type(self.expression, axis=axis)
@property
def shape(self):
return self.df._shape_of(self)
@property
def ndim(self):
return 1 if self.dtype.is_list else len(self.df._shape_of(self))
def to_arrow(self, convert_to_native=False):
'''Convert to Apache Arrow array (will byteswap/copy if convert_to_native=True).'''
values = self.values
return vaex.array_types.to_arrow(values, convert_to_native=convert_to_native)
def __arrow_array__(self, type=None):
values = self.to_arrow()
return pa.array(values, type=type)
def to_numpy(self, strict=True):
"""Return a numpy representation of the data"""
values = self.values
return vaex.array_types.to_numpy(values, strict=strict)
def to_dask_array(self, chunks="auto"):
import dask.array as da
import uuid
dtype = self.dtype
chunks = da.core.normalize_chunks(chunks, shape=self.shape, dtype=dtype.numpy)
name = 'vaex-expression-%s' % str(uuid.uuid1())
def getitem(df, item):
assert len(item) == 1
item = item[0]
start, stop, step = item.start, item.stop, item.step
assert step in [None, 1]
return self.evaluate(start, stop, parallel=False)
dsk = da.core.getem(name, chunks, getitem=getitem, shape=self.shape, dtype=dtype.numpy)
dsk[name] = self
return da.Array(dsk, name, chunks, dtype=dtype.numpy)
def to_pandas_series(self):
"""Return a pandas.Series representation of the expression.
Note: Pandas is likely to make a memory copy of the data.
"""
import pandas as pd
return pd.Series(self.values)
def __getitem__(self, slice):
return self.ds[slice][self.expression]
def __abs__(self):
"""Returns the absolute value of the expression"""
return self.abs()
@property
def dt(self):
"""Gives access to datetime operations via :py:class:`DateTime`"""
return DateTime(self)
@property
def td(self):
"""Gives access to timedelta operations via :py:class:`TimeDelta`"""
return TimeDelta(self)
@property
def str(self):
"""Gives access to string operations via :py:class:`StringOperations`"""
return StringOperations(self)
@property
def str_pandas(self):
"""Gives access to string operations via :py:class:`StringOperationsPandas` (using Pandas Series)"""
return StringOperationsPandas(self)
@property
def values(self):
return self.evaluate()
def derivative(self, var, simplify=True):
var = _ensure_string_from_expression(var)
return self.__class__(self.ds, expresso.derivative(self.ast, var, simplify=simplify))
def expand(self, stop=[]):
"""Expand the expression such that no virtual columns occurs, only normal columns.
Example:
>>> df = vaex.example()
>>> r = np.sqrt(df.data.x**2 + df.data.y**2)
>>> r.expand().expression
'sqrt(((x ** 2) + (y ** 2)))'
"""
stop = _ensure_strings_from_expressions(stop)
def translate(id):
if id in self.ds.virtual_columns and id not in stop:
return self.ds.virtual_columns[id]
expr = expresso.translate(self.ast, translate)
return Expression(self.ds, expr)
def variables(self, ourself=False, expand_virtual=True, include_virtual=True):
"""Return a set of variables this expression depends on.
Example:
>>> df = vaex.example()
>>> r = np.sqrt(df.data.x**2 + df.data.y**2)
>>> r.variables()
{'x', 'y'}
"""
variables = set()
def record(varname):
# do this recursively for virtual columns
if varname in self.ds.virtual_columns and varname not in variables:
if (include_virtual and (varname != self.expression)) or (varname == self.expression and ourself):
variables.add(varname)
if expand_virtual:
variables.update(self.df[self.df.virtual_columns[varname]].variables(ourself=include_virtual, include_virtual=include_virtual))
# we usually don't want to record ourself
elif varname != self.expression or ourself:
variables.add(varname)
expresso.translate(self.ast, record)
# df is a buildin, don't record it, if df is a column name, it will be collected as
# df['df']
variables -= {'df'}
for varname in self._ast_slices:
if varname in self.df.virtual_columns and varname not in variables:
if (include_virtual and (f"df['{varname}']" != self.expression)) or (f"df['{varname}']" == self.expression and ourself):
variables.add(varname)
if expand_virtual:
if varname in self.df.virtual_columns:
variables |= self.df[self.df.virtual_columns[varname]].variables(ourself=include_virtual, include_virtual=include_virtual)
elif f"df['{varname}']" != self.expression or ourself:
variables.add(varname)
return variables
def _graph(self):
""""Return a graph containing the dependencies of this expression
Structure is:
[<string expression>, <function name if callable>, <function object if callable>, [subgraph/dependencies, ....]]
"""
expression = self.expression
def walk(node):
if isinstance(node, six.string_types):
if node in self.ds.virtual_columns:
ex = Expression(self.ds, self.ds.virtual_columns[node])
return [node, None, None, [ex._graph()]]
else:
return node
else:
fname, node_repr, deps = node
if len(node_repr) > 30: # clip too long expressions
node_repr = node_repr[:26] + ' ....'
deps = [walk(dep) for dep in deps]
obj = self.ds.functions.get(fname)
# we don't want the wrapper, we want the underlying object
if isinstance(obj, Function):
obj = obj.f
if isinstance(obj, FunctionSerializablePickle):
obj = obj.f
return [node_repr, fname, obj, deps]
return walk(expresso._graph(expression))
def _graphviz(self, dot=None):
"""Return a graphviz.Digraph object with a graph of the expression"""
from graphviz import Graph, Digraph
node = self._graph()
dot = dot or Digraph(comment=self.expression)
def walk(node):
if isinstance(node, six.string_types):
dot.node(node, node)
return node, node
else:
node_repr, fname, fobj, deps = node
node_id = node_repr
dot.node(node_id, node_repr)
for dep in deps:
dep_id, dep = walk(dep)
dot.edge(node_id, dep_id)
return node_id, node
walk(node)
return dot
def __str__(self):
return self.expression
# def __array__(self, dtype=None):
# '''For casting to a numpy array
# Example:
# >>> np.array(ds.x**2)
# '''
# return self.ds.evaluate(self)
def tolist(self, i1=None, i2=None):
'''Short for expr.evaluate().tolist()'''
values = self.evaluate(i1=i1, i2=i2)
if isinstance(values, (pa.Array, pa.ChunkedArray)):
return values.to_pylist()
return values.tolist()
if not os.environ.get('VAEX_DEBUG', ''):
def __repr__(self):
return self._repr_plain_()
def _repr_plain_(self):
from .formatting import _format_value
def format(values):
for i in range(len(values)):
value = values[i]
yield _format_value(value)
colalign = ("right",) * 2
try:
N = len(self.ds)
if N <= PRINT_MAX_COUNT:
values = format(self.evaluate(0, N))
values = tabulate.tabulate([[i, k] for i, k in enumerate(values)], tablefmt='plain', colalign=colalign)
else:
values_head = format(self.evaluate(0, PRINT_MAX_COUNT//2))
values_tail = format(self.evaluate(N - PRINT_MAX_COUNT//2, N))
values_head = list(zip(range(PRINT_MAX_COUNT//2), values_head)) +\
list(zip(range(N - PRINT_MAX_COUNT//2, N), values_tail))
values = tabulate.tabulate([k for k in values_head], tablefmt='plain', colalign=colalign)
values = values.split('\n')
width = max(map(len, values))
separator = '\n' + '...'.center(width, ' ') + '\n'
values = "\n".join(values[:PRINT_MAX_COUNT//2]) + separator + "\n".join(values[PRINT_MAX_COUNT//2:]) + '\n'
except Exception as e:
values = 'Error evaluating: %r' % e
expression = self.expression
if len(expression) > 60:
expression = expression[:57] + '...'
info = 'Expression = ' + expression + '\n'
dtype = self.dtype
if self.expression in self.ds.get_column_names(hidden=True):
state = "column"
elif self.expression in self.ds.get_column_names(hidden=True):
state = "virtual column"
else:
state = "expression"
line = 'Length: {:,} dtype: {} ({})\n'.format(len(self.ds), dtype, state)
info += line
info += '-' * (len(line)-1) + '\n'
info += values
return info
def count(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, edges=False, progress=None):
'''Shortcut for ds.count(expression, ...), see `Dataset.count`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.count(**kwargs)
def sum(self, axis=None, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Sum elements over given axis.
If no axis is given, it will sum over all axes.
For non list elements, this is a shortcut for ds.sum(expression, ...), see `Dataset.sum`.
>>> list_data = [1, 2, None], None, [], [1, 3, 4, 5]
>>> df = vaex.from_arrays(some_list=pa.array(list_data))
>>> df.some_list.sum().item() # will sum over all axis
16
>>> df.some_list.sum(axis=1).tolist() # sums the list elements
[3, None, 0, 13]
:param int axis: Axis over which to determine the unique elements (None will flatten arrays or lists)
'''
expression = self
if axis is None:
dtype = self.dtype
if dtype.is_list:
axis = [0]
while dtype.is_list:
axis.append(axis[-1] + 1)
dtype = dtype.value_type
elif self.ndim > 1:
axis = list(range(self.ndim))
else:
axis = [0]
elif not isinstance(axis, list):
axis = [axis]
axis = list(set(axis)) # remove repeated elements
dtype = self.dtype
if self.ndim > 1:
array_axes = axis.copy()
if 0 in array_axes:
array_axes.remove(0)
expression = expression.array_sum(axis=array_axes)
for i in array_axes:
axis.remove(i)
del i
del array_axes
elif 1 in axis:
if self.dtype.is_list:
expression = expression.list_sum()
if axis:
axis.remove(1)
else:
raise ValueError(f'axis=1 not supported for dtype={dtype}')
if axis and axis[0] != 0:
raise ValueError(f'Only axis 0 or 1 is supported')
if expression.ndim > 1:
raise ValueError(f'Cannot sum non-scalar (ndim={expression.ndim})')
if axis is None or 0 in axis:
kwargs = dict(locals())
del kwargs['self']
del kwargs['axis']
del kwargs['dtype']
kwargs['expression'] = expression.expression
return self.ds.sum(**kwargs)
else:
return expression
def mean(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.mean(expression, ...), see `Dataset.mean`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.mean(**kwargs)
def std(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.std(expression, ...), see `Dataset.std`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.std(**kwargs)
def var(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.std(expression, ...), see `Dataset.var`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.var(**kwargs)
def minmax(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.minmax(expression, ...), see `Dataset.minmax`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.minmax(**kwargs)
def min(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.min(expression, ...), see `Dataset.min`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.min(**kwargs)
def max(self, binby=[], limits=None, shape=default_shape, selection=False, delay=False, progress=None):
'''Shortcut for ds.max(expression, ...), see `Dataset.max`'''
kwargs = dict(locals())
del kwargs['self']
kwargs['expression'] = self.expression
return self.ds.max(**kwargs)
def nop(self):
"""Evaluates expression, and drop the result, usefull for benchmarking, since vaex is usually lazy"""
return self.ds.nop(self.expression)
@property
def transient(self):
"""If this expression is not transient (e.g. on disk) optimizations can be made"""
return self.expand().expression not in self.ds.columns
@property
def masked(self):
"""Alias to df.is_masked(expression)"""
return self.ds.is_masked(self.expression)
def value_counts(self, dropna=False, dropnan=False, dropmissing=False, ascending=False, progress=False, axis=None):
"""Computes counts of unique values.
WARNING:
* If the expression/column is not categorical, it will be converted on the fly
* dropna is False by default, it is True by default in pandas
:param dropna: when True, it will not report the NA (see :func:`Expression.isna`)
:param dropnan: when True, it will not report the nans(see :func:`Expression.isnan`)
:param dropmissing: when True, it will not report the missing values (see :func:`Expression.ismissing`)
:param ascending: when False (default) it will report the most frequent occuring item first
:param bool axis: Axis over which to determine the unique elements (None will flatten arrays or lists)
:returns: Pandas series containing the counts
"""
from pandas import Series
if axis is not None:
raise ValueError('only axis=None is supported')
data_type = self.data_type()
data_type_item = self.data_type(axis=-1)
transient = self.transient or self.ds.filtered or self.ds.is_masked(self.expression)
if self.is_string() and not transient:
# string is a special case, only ColumnString are not transient
ar = self.ds.columns[self.expression]
if not isinstance(ar, ColumnString):
transient = True
counter_type = counter_type_from_dtype(data_type_item, transient)
counters = [None] * self.ds.executor.thread_pool.nthreads
def map(thread_index, i1, i2, ar):
if counters[thread_index] is None:
counters[thread_index] = counter_type()
if data_type.is_list and axis is None:
ar = ar.values
if data_type_item.is_string:
ar = _to_string_sequence(ar)
else:
ar = vaex.array_types.to_numpy(ar)
if np.ma.isMaskedArray(ar):
mask = np.ma.getmaskarray(ar)
counters[thread_index].update(ar, mask)
else:
counters[thread_index].update(ar)
return 0
def reduce(a, b):
return a+b
self.ds.map_reduce(map, reduce, [self.expression], delay=False, progress=progress, name='value_counts', info=True, to_numpy=False)
counters = [k for k in counters if k is not None]
counter0 = counters[0]
for other in counters[1:]:
counter0.merge(other)
value_counts = counter0.extract()
index = np.array(list(value_counts.keys()))
counts = np.array(list(value_counts.values()))
order = np.argsort(counts)
if not ascending:
order = order[::-1]
counts = counts[order]
index = index[order]
# nan can already be present for dtype=object, remove it
nan_mask = index != index
if np.any(nan_mask):
index = index[~mask]
counts = index[~mask]
# nan can already be present for dtype=object, optionally remove it
none_mask = index == None
if np.any(none_mask):
index = index.tolist()
counts = counts.tolist()
i = index.index(None)
if (dropmissing or dropna):
del index[i]
del counts[i]
else:
index[i] = "missing"
index = np.array(index)
counts = np.array(counts)
if not dropna or not dropnan or not dropmissing:
index = index.tolist()
counts = counts.tolist()
if not (dropnan or dropna) and counter0.nan_count:
index = [np.nan] + index
counts = [counter0.nan_count] + counts
if not (dropmissing or dropna) and counter0.null_count:
index = ['missing'] + index
counts = [counter0.null_count] + counts
return Series(counts, index=index)
@docsubst
def unique(self, dropna=False, dropnan=False, dropmissing=False, selection=None, axis=None, array_type='list', delay=False):
"""Returns all unique values.
:param dropmissing: do not count missing values
:param dropnan: do not count nan values
:param dropna: short for any of the above, (see :func:`Expression.isna`)
:param bool axis: Axis over which to determine the unique elements (None will flatten arrays or lists)
:param bool array_type: {array_type}
"""
return self.ds.unique(self.expression, dropna=dropna, dropnan=dropnan, dropmissing=dropmissing, selection=selection, array_type=array_type, axis=axis, delay=delay)
def nunique(self, dropna=False, dropnan=False, dropmissing=False, selection=None, axis=None, delay=False):
"""Counts number of unique values, i.e. `len(df.x.unique()) == df.x.nunique()`.
:param dropmissing: do not count missing values
:param dropnan: do not count nan values
:param dropna: short for any of the above, (see :func:`Expression.isna`)
:param bool axis: Axis over which to determine the unique elements (None will flatten arrays or lists)
"""
return len(self.unique(dropna=dropna, dropnan=dropnan, dropmissing=dropmissing, selection=selection, axis=axis, delay=delay))
def countna(self):
"""Returns the number of Not Availiable (N/A) values in the expression.
This includes missing values and np.nan values.
"""
return self.isna().sum().item() # so the output is int, not array
def countnan(self):
"""Returns the number of NaN values in the expression."""
return self.isnan().sum().item() # so the output is int, not array
def countmissing(self):
"""Returns the number of missing values in the expression."""
return self.ismissing().sum().item() # so the output is int, not array
def evaluate(self, i1=None, i2=None, out=None, selection=None, parallel=True, array_type=None):
return self.ds.evaluate(self, i1, i2, out=out, selection=selection, array_type=array_type, parallel=parallel)
# TODO: it is not so elegant we need to have a custom version of this
# it now also misses the docstring, reconsider how the the meta class auto
# adds this method
def fillna(self, value, fill_nan=True, fill_masked=True):
return self.ds.func.fillna(self, value=value, fill_nan=fill_nan, fill_masked=fill_masked)
def clip(self, lower=None, upper=None):
return self.ds.func.clip(self, lower, upper)
def jit_numba(self, verbose=False):
f = FunctionSerializableNumba.build(self.expression, df=self.ds, verbose=verbose, compile=self.ds.is_local())
function = self.ds.add_function('_jit', f, unique=True)
return function(*f.arguments)
def jit_cuda(self, verbose=False):
f = FunctionSerializableCuda.build(self.expression, df=self.ds, verbose=verbose, compile=self.ds.is_local())
function = self.ds.add_function('_jit', f, unique=True)
return function(*f.arguments)
def jit_pythran(self, verbose=False):
import logging
logger = logging.getLogger('pythran')
log_level = logger.getEffectiveLevel()
try:
if not verbose:
logger.setLevel(logging.ERROR)
import pythran
import imp
import hashlib
# self._import_all(module)
names = []
funcs = set(expression_namespace.keys())
expression = self.expression
if expression in self.ds.virtual_columns:
expression = self.ds.virtual_columns[self.expression]
all_vars = self.ds.get_column_names(virtual=True, strings=True, hidden=True) + list(self.ds.variables.keys())
vaex.expresso.validate_expression(expression, all_vars, funcs, names)
names = list(set(names))
types = ", ".join(str(self.ds.data_type(name)) + "[]" for name in names)
argstring = ", ".join(names)
code = '''
from numpy import *
#pythran export f({2})
def f({0}):
return {1}'''.format(argstring, expression, types)
if verbose:
print("generated code")
print(code)
m = hashlib.md5()
m.update(code.encode('utf-8'))
module_name = "pythranized_" + m.hexdigest()
# print(m.hexdigest())
module_path = pythran.compile_pythrancode(module_name, code, extra_compile_args=["-DBOOST_SIMD", "-march=native"] + [] if verbose else ["-w"])
module = imp.load_dynamic(module_name, module_path)
function_name = "f_" + m.hexdigest()
function = self.ds.add_function(function_name, module.f, unique=True)
return Expression(self.ds, "{0}({1})".format(function.name, argstring))
finally:
logger.setLevel(log_level)
def _rename(self, old, new, inplace=False):
expression = self if inplace else self.copy()
if old in expression.ast_names:
for node in expression.ast_names[old]:
node.id = new
expression._ast_names[new] = expression._ast_names.pop(old)
slices = expression._ast_slices
if old in slices:
for node in slices[old]:
if node.value.id == 'df' and isinstance(node.slice.value, ast.Str):
node.slice.value.s = new
else: # py39
node.slice.value = new
expression._expression = None # resets the cached string representation
return expression
def astype(self, data_type):
if vaex.array_types.is_string_type(data_type) or data_type == str:
return self.ds.func.astype(self, 'str')
else:
return self.ds.func.astype(self, str(data_type))
def isin(self, values, use_hashmap=True):
"""Lazily tests if each value in the expression is present in values.
:param values: List/array of values to check
:param use_hashmap: use a hashmap or not (especially faster when values contains many elements)
:return: :class:`Expression` with the lazy expression.
"""
if use_hashmap:
# easiest way to create a set is using the vaex dataframe
values = np.array(values, dtype=self.dtype.numpy) # ensure that values are the same dtype as the expression (otherwise the set downcasts at the C++ level during execution)
df_values = vaex.from_arrays(x=values)
ordered_set = df_values._set(df_values.x)
var = self.df.add_variable('var_isin_ordered_set', ordered_set, unique=True)
return self.df['isin_set(%s, %s)' % (self, var)]
else:
if self.is_string():
values = pa.array(values)
else:
values = np.array(values, dtype=self.dtype.numpy)
var = self.df.add_variable('isin_values', values, unique=True)
return self.df['isin(%s, %s)' % (self, var)]
def apply(self, f, vectorize=False, multiprocessing=True):
"""Apply a function along all values of an Expression.
Shorthand for ``df.apply(f, arguments=[expression])``, see :meth:`DataFrame.apply`
Example:
>>> df = vaex.example()
>>> df.x
Expression = x
Length: 330,000 dtype: float64 (column)
---------------------------------------
0 -0.777471
1 3.77427
2 1.37576
3 -7.06738
4 0.243441
>>> def func(x):
... return x**2
>>> df.x.apply(func)
Expression = lambda_function(x)
Length: 330,000 dtype: float64 (expression)
-------------------------------------------
0 0.604461
1 14.2451
2 1.89272
3 49.9478
4 0.0592637
:param f: A function to be applied on the Expression values
:param vectorize: Call f with arrays instead of a scalars (for better performance).
:param bool multiprocessing: Use multiple processes to avoid the GIL (Global interpreter lock).
:returns: A function that is lazily evaluated when called.
"""
return self.ds.apply(f, [self.expression], vectorize=vectorize, multiprocessing=multiprocessing)
def dropmissing(self):
# TODO: df.dropna does not support inplace
# df = self.df if inplace else self.df.copy()
df = self.ds
df = df.dropmissing(column_names=[self.expression])
return df._expr(self.expression)
def dropnan(self):
# TODO: df.dropna does not support inplace
# df = self.df if inplace else self.df.copy()
df = self.ds
df = df.dropnan(column_names=[self.expression])
return df._expr(self.expression)
def dropna(self):
# TODO: df.dropna does not support inplace
# df = self.df if inplace else self.df.copy()
df = self.ds
df = df.dropna(column_names=[self.expression])
return df._expr(self.expression)
def map(self, mapper, nan_value=None, missing_value=None, default_value=None, allow_missing=False, axis=None):
"""Map values of an expression or in memory column according to an input
dictionary or a custom callable function.
Example:
>>> import vaex
>>> df = vaex.from_arrays(color=['red', 'red', 'blue', 'red', 'green'])
>>> mapper = {'red': 1, 'blue': 2, 'green': 3}
>>> df['color_mapped'] = df.color.map(mapper)
>>> df
# color color_mapped
0 red 1
1 red 1
2 blue 2
3 red 1
4 green 3
>>> import numpy as np
>>> df = vaex.from_arrays(type=[0, 1, 2, 2, 2, np.nan])
>>> df['role'] = df['type'].map({0: 'admin', 1: 'maintainer', 2: 'user', np.nan: 'unknown'})
>>> df
# type role
0 0 admin
1 1 maintainer
2 2 user
3 2 user
4 2 user
5 nan unknown
>>> import vaex
>>> import numpy as np
>>> df = vaex.from_arrays(type=[0, 1, 2, 2, 2, 4])
>>> df['role'] = df['type'].map({0: 'admin', 1: 'maintainer', 2: 'user'}, default_value='unknown')
>>> df
# type role
0 0 admin
1 1 maintainer
2 2 user
3 2 user
4 2 user
5 4 unknown
:param mapper: dict like object used to map the values from keys to values
:param nan_value: value to be used when a nan is present (and not in the mapper)
:param missing_value: value to use used when there is a missing value
:param default_value: value to be used when a value is not in the mapper (like dict.get(key, default))
:param allow_missing: used to signal that values in the mapper should map to a masked array with missing values,
assumed True when default_value is not None.
:param bool axis: Axis over which to determine the unique elements (None will flatten arrays or lists)
:return: A vaex expression
:rtype: vaex.expression.Expression
"""
assert isinstance(mapper, collectionsAbc.Mapping), "mapper should be a dict like object"
if axis is not None:
raise ValueError('only axis=None is supported')
df = self.ds
mapper_keys = list(mapper.keys())
try:
mapper_nan_key_mask = np.isnan(mapper_keys)
except TypeError:
# case where we have mixed strings/nan etc
def try_nan(x):
try:
return np.isnan(x)
except:
return False
mapper_nan_key_mask = np.array([try_nan(k) for k in mapper_keys])
mapper_has_nan = mapper_nan_key_mask.sum() > 0
if mapper_nan_key_mask.sum() > 1:
raise ValueError('Insanity, you provided multiple nan values as keys for your dict')
if mapper_has_nan:
for key, value in mapper.items():
if key != key:
nan_value = value
for key, value in mapper.items():
if key is None:
missing_value = value
if axis is not None:
raise ValueError('only axis=None is supported')
# we map the keys to a ordinal values [0, N-1] using the set
key_set = df._set(self.expression, flatten=axis is None)
found_keys = key_set.keys()
# we want all possible values to be converted
# so mapper's key should be a superset of the keys found
use_masked_array = False
if default_value is not None:
allow_missing = True
if allow_missing:
use_masked_array = True
if not set(mapper_keys).issuperset(found_keys):
missing = set(found_keys).difference(mapper_keys)
missing0 = list(missing)[0]
only_has_nan = missing0 != missing0 and len(missing) == 1
if allow_missing:
if default_value is not None:
value0 = list(mapper.values())[0]
assert np.issubdtype(type(default_value), np.array(value0).dtype), "default value has to be of similar type"
else:
if only_has_nan:
pass # we're good, the hash mapper deals with nan
else:
raise ValueError('Missing %i values in mapper: %s' % (len(missing), missing))
# and these are the corresponding choices
# note that here we map 'planned' unknown values to the default values
# and later on in _choose, we map values not even seen in the dataframe
# to the default_value
dtype = self.data_type(self.expression)
dtype_item = self.data_type(self.expression, axis=-1)
if dtype_item.is_float:
print(nan_value)
values = [np.nan, None] + [key for key in mapper if key == key and key is not None]
choices = [default_value, nan_value, missing_value] + [mapper[key] for key in mapper if key == key and key is not None]
else:
values = [None] + [key for key in mapper if key is not None]
choices = [default_value, missing_value] + [mapper[key] for key in mapper if key is not None]
values = pa.array(values)
choices = pa.array(choices)
from .hash import ordered_set_type_from_dtype
ordered_set_type = ordered_set_type_from_dtype(dtype_item)
ordered_set = ordered_set_type()
if vaex.array_types.is_string_type(dtype_item):
values = _to_string_sequence(values)
else:
values = vaex.array_types.to_numpy(values)
if np.ma.isMaskedArray(values):
mask = np.ma.getmaskarray(values)
ordered_set.update(values.data, mask)
else:
ordered_set.update(values)
key_set_name = df.add_variable('map_key_set', ordered_set, unique=True)
choices_name = df.add_variable('map_choices', choices, unique=True)
if allow_missing:
expr = '_map({}, {}, {}, use_missing={!r}, axis={!r})'.format(self, key_set_name, choices_name, use_masked_array, axis)
else:
expr = '_map({}, {}, {}, axis={!r})'.format(self, key_set_name, choices_name, axis)
return Expression(df, expr)
@property
def is_masked(self):
return self.ds.is_masked(self.expression)
def is_string(self):
return self.df.is_string(self.expression)
class FunctionSerializable(object):
pass
@vaex.serialize.register
class FunctionSerializablePickle(FunctionSerializable):
def __init__(self, f=None, multiprocessing=False):
self.f = f
self.multiprocessing = multiprocessing
def __eq__(self, rhs):
return self.f == rhs.f
def pickle(self, function):
return pickle.dumps(function)
def unpickle(self, data):
return pickle.loads(data)
def __getstate__(self):
return self.state_get()
def __setstate__(self, state):
self.state_set(state)
def state_get(self):
data = self.pickle(self.f)
if vaex.utils.PY2:
pickled = base64.encodestring(data)
else:
pickled = base64.encodebytes(data).decode('ascii')
return dict(pickled=pickled)
@classmethod
def state_from(cls, state, trusted=True):
obj = cls()
obj.state_set(state, trusted=trusted)
return obj
def state_set(self, state, trusted=True):
data = state['pickled']
if vaex.utils.PY2:
data = base64.decodestring(data)
else:
data = base64.decodebytes(data.encode('ascii'))
if trusted is False:
raise ValueError("Will not unpickle data when source is not trusted")
self.f = self.unpickle(data)
def __call__(self, *args, **kwargs):
'''Forward the call to the real function'''
import vaex.multiprocessing
return vaex.multiprocessing.apply(self._apply, args, kwargs, self.multiprocessing)
def _apply(self, *args, **kwargs):
return self.f(*args, **kwargs)
class FunctionSerializableJit(FunctionSerializable):
def __init__(self, expression, arguments, argument_dtypes, return_dtype, verbose=False, compile=True):
self.expression = expression
self.arguments = arguments
self.argument_dtypes = argument_dtypes
self.return_dtype = return_dtype
self.verbose = verbose
if compile:
self.f = self.compile()
else:
def placeholder(*args, **kwargs):
raise Exception('You chose not to compile this function (locally), but did invoke it')
self.f = placeholder
def state_get(self):
return dict(expression=self.expression,
arguments=self.arguments,
argument_dtypes=list(map(lambda dtype: str(dtype.numpy), self.argument_dtypes)),
return_dtype=str(self.return_dtype),
verbose=self.verbose)
@classmethod
def state_from(cls, state, trusted=True):
return cls(expression=state['expression'],
arguments=state['arguments'],
argument_dtypes=list(map(lambda s: DataType(np.dtype(s)), state['argument_dtypes'])),
return_dtype=DataType(np.dtype(state['return_dtype'])),
verbose=state['verbose'])
@classmethod
def build(cls, expression, df=None, verbose=False, compile=True):
df = df or expression.df
# if it's a virtual column, we probably want to optimize that
# TODO: fully extract the virtual columns, i.e. depending ones?
expression = str(expression)
if expression in df.virtual_columns:
expression = df.virtual_columns[expression]
# function validation, and finding variable names
all_vars = df.get_column_names(hidden=True) + list(df.variables.keys())
funcs = set(list(expression_namespace.keys()) + list(df.functions.keys()))
names = []
vaex.expresso.validate_expression(expression, all_vars, funcs, names)
# TODO: can we do the above using the Expressio API?s
arguments = list(set(names))
argument_dtypes = [df.data_type(argument, array_type='numpy') for argument in arguments]
return_dtype = df[expression].dtype
return cls(str(expression), arguments, argument_dtypes, return_dtype, verbose, compile=compile)
def __call__(self, *args, **kwargs):
'''Forward the call to the numba function'''
return self.f(*args, **kwargs)
@vaex.serialize.register
class FunctionSerializableNumba(FunctionSerializableJit):
def compile(self):
import numba
argstring = ", ".join(self.arguments)
code = '''
from numpy import *
def f({0}):
return {1}'''.format(argstring, self.expression)
if self.verbose:
print('Generated code:\n' + code)
scope = {}
exec(code, scope)
f = scope['f']
# numba part
def get_type(name):
if name == "bool":
name = "bool_"
return getattr(numba, name)
argument_dtypes_numba = [get_type(argument_dtype.numpy.name) for argument_dtype in self.argument_dtypes]
return_dtype_numba = get_type(self.return_dtype.numpy.name)
vectorizer = numba.vectorize([return_dtype_numba(*argument_dtypes_numba)])
return vectorizer(f)
@vaex.serialize.register
class FunctionSerializableCuda(FunctionSerializableJit):
def compile(self):
import cupy
# code generation
argstring = ", ".join(self.arguments)
code = '''
from cupy import *
import cupy
@fuse()
def f({0}):
return {1}
'''.format(argstring, self.expression)#, ";".join(conversions))
if self.verbose:
print("generated code")
print(code)
scope = dict()#cupy=cupy)
exec(code, scope)
func = scope['f']
def wrapper(*args):
args = [vaex.array_types.to_numpy(k) for k in args]
args = [vaex.utils.to_native_array(arg) if isinstance(arg, np.ndarray) else arg for arg in args]
args = [cupy.asarray(arg) if isinstance(arg, np.ndarray) else arg for arg in args]
return cupy.asnumpy(func(*args))
return wrapper
# TODO: this is not the right abstraction, since this won't allow a
# numba version for the function
@vaex.serialize.register
class FunctionToScalar(FunctionSerializablePickle):
def __call__(self, *args, **kwargs):
import vaex.multiprocessing
return vaex.multiprocessing.apply(self._apply, args, kwargs, self.multiprocessing)
def _apply(self, *args, **kwargs):
length = len(args[0])
result = []
def fix_type(v):
# TODO: only when column is str type?
if isinstance(v, np.str_):
return str(v)
if isinstance(v, np.bytes_):
return v.decode('utf8')
else:
return v
args = [vaex.array_types.tolist(k) for k in args]
for i in range(length):
scalar_result = self.f(*[fix_type(k[i]) for k in args], **{key: value[i] for key, value in kwargs.items()})
result.append(scalar_result)
result = np.array(result)
return result
class Function(object):
def __init__(self, dataset, name, f):
self.dataset = dataset
self.name = name
if not vaex.serialize.can_serialize(f): # if not serializable, assume we can use pickle
f = FunctionSerializablePickle(f)
self.f = f
def __call__(self, *args, **kwargs):
arg_string = ", ".join([str(k) for k in args] + ['{}={:r}'.format(name, value) for name, value in kwargs.items()])
expression = "{}({})".format(self.name, arg_string)
return Expression(self.dataset, expression)
class FunctionBuiltin(object):
def __init__(self, dataset, name, **kwargs):
self.dataset = dataset
self.name = name
self.kwargs = kwargs
def __call__(self, *args, **kwargs):
kwargs = dict(kwargs, **self.kwargs)
arg_string = ", ".join([str(k) for k in args] + ['{}={:r}'.format(name, value) for name, value in kwargs.items()])
expression = "{}({})".format(self.name, arg_string)
return Expression(self.dataset, expression)
| 40.044118 | 184 | 0.587899 |
61d8e9a4a5c1c584941837fddb3a82e0ed11d430 | 810 | py | Python | pytorch_unet/utils/metrics.py | mukeshmithrakumar/UNet | 3f83f5116cd897293f1075f448703b75930707d5 | [
"MIT"
] | 11 | 2019-02-03T14:20:24.000Z | 2021-06-28T15:18:59.000Z | pytorch_unet/utils/metrics.py | mukeshmithrakumar/radnet | 3f83f5116cd897293f1075f448703b75930707d5 | [
"MIT"
] | null | null | null | pytorch_unet/utils/metrics.py | mukeshmithrakumar/radnet | 3f83f5116cd897293f1075f448703b75930707d5 | [
"MIT"
] | 2 | 2019-07-19T20:00:24.000Z | 2020-02-18T04:49:49.000Z | import numpy as np
def dice(image1, image2, empty_score=1.0):
"""Dice score implementation.
Note:
The F1 score is also known as the Sørensen–Dice coefficient or Dice similarity coefficient (DSC).
:param image1 : The prediction
:param image2 : The label
:param empty_score : is to prevent division by 0
:return : Dice scores
"""
image1 = np.asarray(image1).astype(np.bool)
image2 = np.asarray(image2).astype(np.bool)
if image1.shape != image2.shape:
raise ValueError("Shape Mismatch: image1 and image2 must have the same shape")
union = image1.sum() + image2.sum()
if union == 0:
return empty_score
intersection = np.logical_and(image1, image2)
return 2. * intersection.sum() / union
| 31.153846 | 105 | 0.635802 |
6fa38cf1a1fc3cf983b70ab26586f733f0d30b81 | 1,938 | py | Python | cyborg/common/nova_client.py | ChameleonCloud/cyborg | f3ab2f37dd973aad30b7364c0dea5b610309f250 | [
"Apache-2.0"
] | null | null | null | cyborg/common/nova_client.py | ChameleonCloud/cyborg | f3ab2f37dd973aad30b7364c0dea5b610309f250 | [
"Apache-2.0"
] | null | null | null | cyborg/common/nova_client.py | ChameleonCloud/cyborg | f3ab2f37dd973aad30b7364c0dea5b610309f250 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cyborg.common import utils
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class NovaAPI(object):
def __init__(self):
self.nova_client = utils.get_sdk_adapter('compute')
def _get_acc_changed_event(self, instance_uuid, dev_profile_name, status):
return [{'name': 'accelerator-requests-bound',
'server_uuid': instance_uuid,
'tag': dev_profile_name,
'status': status}
]
def _send_events(self, events):
url = "/os-server-external-events"
body = {"events": events}
response = self.nova_client.post(url, json=body)
if response.ok:
LOG.info("Sucessfully send events to Nova, events: %(events)s",
{"events": events})
return True
else:
raise Exception(
"Failed to send events %s: HTTP %d: %s" %
(events, response.status_code, response.text))
return False
def notify_binding(self, instance_uuid, dev_profile_name, status):
events = self._get_acc_changed_event(instance_uuid, dev_profile_name,
status)
result = self._send_events(events)
if not result:
LOG.error("Failed to notify Nova service.")
return result
| 36.566038 | 78 | 0.635707 |
f20a94f618790315b2c17ab64aa3f7324f6bdf13 | 16,122 | py | Python | typhon/retrieval/qrnn/models/pytorch/common.py | simonpf/typhon | 2a353f5adff0ff9470dffaded4e4d367e85340d2 | [
"MIT"
] | 1 | 2020-12-18T17:19:16.000Z | 2020-12-18T17:19:16.000Z | typhon/retrieval/qrnn/models/pytorch/common.py | simonpf/typhon | 2a353f5adff0ff9470dffaded4e4d367e85340d2 | [
"MIT"
] | null | null | null | typhon/retrieval/qrnn/models/pytorch/common.py | simonpf/typhon | 2a353f5adff0ff9470dffaded4e4d367e85340d2 | [
"MIT"
] | null | null | null | """
models.pytorch.common
=====================
This module provides common functionality required to realize QRNNs in pytorch.
"""
import torch
import numpy as np
from torch import nn
from torch import optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import Dataset
from tqdm import tqdm
activations = {
"elu": nn.ELU,
"hardshrink": nn.Hardshrink,
"hardtanh": nn.Hardtanh,
"prelu": nn.PReLU,
"relu": nn.ReLU,
"selu": nn.SELU,
"celu": nn.CELU,
"sigmoid": nn.Sigmoid,
"softplus": nn.Softplus,
"softmin": nn.Softmin,
}
def save_model(f, model):
"""
Save pytorch model.
Args:
f(:code:`str` or binary stream): Either a path or a binary stream
to store the data to.
model(:code:`pytorch.nn.Moduel`): The pytorch model to save
"""
torch.save(model, f)
def load_model(f, quantiles):
"""
Load pytorch model.
Args:
f(:code:`str` or binary stream): Either a path or a binary stream
to read the model from
quantiles(:code:`np.ndarray`): Array containing the quantiles
that the model predicts.
Returns:
The loaded pytorch model.
"""
model = torch.load(f)
return model
def handle_input(data, device=None):
"""
Handle input data.
This function handles data supplied
- as tuple of :code:`np.ndarray`
- a single :code:`np.ndarray`
- torch :code:`dataloader`
If a numpy array is provided it is converted to a torch tensor
so that it can be fed into a pytorch model.
"""
if type(data) == tuple:
x, y = data
x = torch.tensor(x, dtype=torch.float)
y = torch.tensor(y, dtype=torch.float)
if not device is None:
x = x.to(device)
y = y.to(device)
return x, y
if type(data) == np.ndarray:
x = torch.tensor(data, dtype=torch.float)
if not device is None:
x = x.to(device)
return x
else:
return data
class BatchedDataset(Dataset):
"""
Batches an un-batched dataset.
"""
def __init__(self, training_data, batch_size):
x, y = training_data
self.x = torch.tensor(x, dtype=torch.float)
self.y = torch.tensor(y, dtype=torch.float)
self.batch_size = batch_size
def __len__(self):
# This is required because x and y are tensors and don't throw these
# errors themselves.
return self.x.shape[0] // self.batch_size
def __getitem__(self, i):
if i >= len(self):
raise IndexError()
i_start = i * self.batch_size
i_end = (i + 1) * self.batch_size
x = self.x[i_start:i_end]
y = self.y[i_start:i_end]
return (x, y)
################################################################################
# Quantile loss
################################################################################
class QuantileLoss:
r"""
The quantile loss function
This function object implements the quantile loss defined as
.. math::
\mathcal{L}(y_\text{pred}, y_\text{true}) =
\begin{cases}
\tau \cdot |y_\text{pred} - y_\text{true}| & , y_\text{pred} < y_\text{true} \\
(1 - \tau) \cdot |y_\text{pred} - y_\text{true}| & , \text{otherwise}
\end{cases}
as a training criterion for the training of neural networks. The loss criterion
expects a vector :math:`\mathbf{y}_\tau` of predicted quantiles and the observed
value :math:`y`. The loss for a single training sample is computed by summing the losses
corresponding to each quantiles. The loss for a batch of training samples is
computed by taking the mean over all samples in the batch.
"""
def __init__(self, quantiles, mask=None):
"""
Create an instance of the quantile loss function with the given quantiles.
Arguments:
quantiles: Array or iterable containing the quantiles to be estimated.
"""
self.quantiles = torch.tensor(quantiles).float()
self.n_quantiles = len(quantiles)
self.mask = mask
if self.mask:
self.mask = np.float32(mask)
def to(self, device):
self.quantiles = self.quantiles.to(device)
def __call__(self, y_pred, y_true):
"""
Compute the mean quantile loss for given inputs.
Arguments:
y_pred: N-tensor containing the predicted quantiles along the last
dimension
y_true: (N-1)-tensor containing the true y values corresponding to
the predictions in y_pred
Returns:
The mean quantile loss.
"""
dy = y_pred - y_true
n = self.quantiles.size()[0]
qs = self.quantiles.reshape((n,) + (1,) * max(len(dy.size()) - 2, 0))
l = torch.where(dy >= 0.0, (1.0 - qs) * dy, (-qs) * dy)
if self.mask:
l = torch.where(y_true == self.mask, torch.zeros_like(l), l)
return l.mean()
################################################################################
# QRNN
################################################################################
class PytorchModel:
"""
Quantile regression neural network (QRNN)
This class implements QRNNs as a fully-connected network with
a given number of layers.
"""
def __init__(self, input_dimension, quantiles):
"""
Arguments:
input_dimension(int): The number of input features.
quantiles(array): Array of the quantiles to predict.
"""
self.input_dimension = input_dimension
self.quantiles = np.array(quantiles)
self.criterion = QuantileLoss(self.quantiles)
self.training_errors = []
self.validation_errors = []
self.backend = "typhon.retrieval.qrnn.models.pytorch"
def _make_adversarial_samples(self, x, y, eps):
self.zero_grad()
x.requires_grad = True
y_pred = self(x)
c = self.criterion(y_pred, y)
c.backward()
x_adv = x.detach() + eps * torch.sign(x.grad.detach())
return x_adv
def reset(self):
"""
Reinitializes the weights of a model.
"""
def reset_function(module):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
m.reset_parameters()
self.apply(reset_function)
def train(self, *args, **kwargs):
"""
Train the network.
This trains the network for the given number of epochs using the
provided training and validation data.
If desired, the training can be augmented using adversarial training.
In this case the network is additionally trained with an adversarial
batch of examples in each step of the training.
Arguments:
training_data: pytorch dataloader providing the training data
validation_data: pytorch dataloader providing the validation data
n_epochs: the number of epochs to train the network for
adversarial_training: whether or not to use adversarial training
eps_adv: The scaling factor to use for adversarial training.
"""
# Handle overload of train() method
if len(args) < 1 or (len(args) == 1 and type(args[0]) == bool):
return nn.Sequential.train(self, *args, **kwargs)
#
# Parse training arguments
#
training_data = args[0]
arguments = {
"validation_data": None,
"batch_size": 256,
"sigma_noise": None,
"adversarial_training": False,
"delta_at": 0.01,
"initial_learning_rate": 1e-2,
"momentum": 0.0,
"convergence_epochs": 5,
"learning_rate_decay": 2.0,
"learning_rate_minimum": 1e-6,
"maximum_epochs": 1,
"training_split": 0.9,
"gpu": False,
"optimizer": None,
"learning_rate_scheduler": None
}
argument_names = arguments.keys()
for a, n in zip(args[1:], argument_names):
arguments[n] = a
for k in kwargs:
if k in arguments:
arguments[k] = kwargs[k]
else:
raise ValueError("Unknown argument to {}.".print(k))
validation_data = arguments["validation_data"]
batch_size = arguments["batch_size"]
sigma_noise = arguments["sigma_noise"]
adversarial_training = arguments["adversarial_training"]
delta_at = arguments["delta_at"]
initial_learning_rate = arguments["initial_learning_rate"]
convergence_epochs = arguments["convergence_epochs"]
learning_rate_decay = arguments["learning_rate_decay"]
learning_rate_minimum = arguments["learning_rate_minimum"]
maximum_epochs = arguments["maximum_epochs"]
training_split = arguments["training_split"]
gpu = arguments["gpu"]
momentum = arguments["momentum"]
optimizer = arguments["optimizer"]
learning_rate_scheduler = arguments["learning_rate_scheduler"]
#
# Determine device to use
#
if torch.cuda.is_available() and gpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
self.to(device)
#
# Handle input data
#
try:
x, y = handle_input(training_data, device)
training_data = BatchedDataset((x, y), batch_size)
except:
pass
self.train()
if not optimizer:
self.optimizer = optim.SGD(
self.parameters(), lr=initial_learning_rate, momentum=momentum
)
else:
self.optimizer = optimizer
self.criterion.to(device)
if not optimizer and not learning_rate_scheduler:
scheduler = ReduceLROnPlateau(
self.optimizer,
factor=1.0 / learning_rate_decay,
patience=convergence_epochs,
min_lr=learning_rate_minimum,
)
else:
scheduler = learning_rate_scheduler
training_errors = []
validation_errors = []
#
# Training loop
#
for i in range(maximum_epochs):
err = 0.0
n = 0
for j, (x, y) in enumerate(training_data):
x = x.to(device)
y = y.to(device)
shape = x.size()
shape = (shape[0], 1) + shape[2:]
y = y.reshape(shape)
self.optimizer.zero_grad()
y_pred = self(x)
c = self.criterion(y_pred, y)
c.backward()
self.optimizer.step()
err += c.item() * x.size()[0]
n += x.size()[0]
if adversarial_training:
self.optimizer.zero_grad()
x_adv = self._make_adversarial_samples(x, y, delta_at)
y_pred = self(x)
c = self.criterion(y_pred, y)
c.backward()
self.optimizer.step()
if j % 100:
print(
"Epoch {} / {}: Batch {} / {}, Training error: {:.3f}".format(
i, maximum_epochs, j, len(training_data), err / n
),
end="\r",
)
# Save training error
training_errors.append(err / n)
lr = [group["lr"] for group in self.optimizer.param_groups][0]
val_err = 0.0
if not validation_data is None:
n = 0
for x, y in validation_data:
x = x.to(device).detach()
y = y.to(device).detach()
shape = x.size()
shape = (shape[0], 1) + shape[2:]
y = y.reshape(shape)
y_pred = self(x)
c = self.criterion(y_pred, y)
val_err += c.item() * x.size()[0]
n += x.size()[0]
validation_errors.append(val_err / n)
print(
"Epoch {} / {}: Training error: {:.3f}, Validation error: {:.3f}, Learning rate: {:.5f}".format(
i,
maximum_epochs,
training_errors[-1],
validation_errors[-1],
lr,
)
)
if scheduler:
scheduler.step()
else:
scheduler.step()
print(
"Epoch {} / {}: Training error: {:.3f}, Learning rate: {:.5f}".format(
i, maximum_epochs, training_errors[-1], lr
)
)
self.training_errors += training_errors
self.validation_errors += validation_errors
self.eval()
return {
"training_errors": self.training_errors,
"validation_errors": self.validation_errors,
}
def predict(self, x, gpu=False):
""
if torch.cuda.is_available() and gpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
x = handle_input(x, device)
self.to(device)
return self(x.detach()).detach().numpy()
def calibration(self, data, gpu=False):
"""
Computes the calibration of the predictions from the neural network.
Arguments:
data: torch dataloader object providing the data for which to compute
the calibration.
Returns:
(intervals, frequencies): Tuple containing the confidence intervals and
corresponding observed frequencies.
"""
if gpu and torch.cuda.is_available():
dev = torch.device("cuda")
else:
dev = torch.device("cpu")
self.to(dev)
n_intervals = self.quantiles.size // 2
qs = self.quantiles
intervals = np.array([q_r - q_l for (q_l, q_r) in zip(qs, reversed(qs))])[
:n_intervals
]
counts = np.zeros(n_intervals)
total = 0.0
iterator = tqdm(data)
for x, y in iterator:
x = x.to(dev).detach()
y = y.to(dev).detach()
shape = x.size()
shape = (shape[0], 1) + shape[2:]
y = y.reshape(shape)
y_pred = self(x)
y_pred = y_pred.cpu()
y = y.cpu()
for i in range(n_intervals):
l = y_pred[:, [i]]
r = y_pred[:, [-(i + 1)]]
counts[i] += np.logical_and(y >= l, y < r).sum()
total += np.prod(y.size())
return intervals[::-1], (counts / total)[::-1]
def save(self, path):
"""
Save QRNN to file.
Arguments:
The path in which to store the QRNN.
"""
torch.save(
{
"input_dimension": self.input_dimension,
"quantiles": self.quantiles,
"width": self.width,
"depth": self.depth,
"activation": self.activation,
"network_state": self.state_dict(),
"optimizer_state": self.optimizer.state_dict(),
},
path,
)
@staticmethod
def load(self, path):
"""
Load QRNN from file.
Arguments:
path: Path of the file where the QRNN was stored.
"""
state = torch.load(path, map_location=torch.device("cpu"))
keys = ["input_dimension", "quantiles", "depth", "width", "activation"]
qrnn = QRNN(*[state[k] for k in keys])
qrnn.load_state_dict["network_state"]
qrnn.optimizer.load_state_dict["optimizer_state"]
| 31.183752 | 116 | 0.528036 |
9df5b396dcabedd02e2ef955cce766661f250ab2 | 608 | py | Python | ui-tests/jupyter_server_test_config.py | madhur-tandon/search-replace | 1ed21545577afd3a9c7a611011e0862ee1c07d03 | [
"BSD-3-Clause"
] | null | null | null | ui-tests/jupyter_server_test_config.py | madhur-tandon/search-replace | 1ed21545577afd3a9c7a611011e0862ee1c07d03 | [
"BSD-3-Clause"
] | null | null | null | ui-tests/jupyter_server_test_config.py | madhur-tandon/search-replace | 1ed21545577afd3a9c7a611011e0862ee1c07d03 | [
"BSD-3-Clause"
] | null | null | null | """Server configuration for integration tests.
!! Never use this configuration in production because it
opens the server to the world and provide access to JupyterLab
JavaScript objects through the global window variable.
"""
from tempfile import mkdtemp
c.ServerApp.port = 8888
c.ServerApp.port_retries = 0
c.ServerApp.open_browser = False
c.ServerApp.root_dir = mkdtemp(prefix="galata-test-")
c.ServerApp.token = ""
c.ServerApp.password = ""
c.ServerApp.disable_check_xsrf = True
c.LabApp.expose_app_in_browser = True
# Uncomment to set server log level to debug level
# c.ServerApp.log_level = "DEBUG"
| 30.4 | 62 | 0.786184 |
61ef90719b5d5d759de1a6b80a1ea748d8bb0911 | 3,562 | py | Python | hooks/charmhelpers/__init__.py | CanonicalBootStack/charm-neutron-openvswitch | b404c18a506c4dcb0967b85cbafb0d238bc3f773 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-09-17T02:50:45.000Z | 2021-09-17T02:50:45.000Z | hooks/charmhelpers/__init__.py | CanonicalBootStack/charm-neutron-openvswitch | b404c18a506c4dcb0967b85cbafb0d238bc3f773 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2019-07-10T15:27:36.000Z | 2019-07-10T15:28:05.000Z | hooks/charmhelpers/__init__.py | CanonicalBootStack/charm-neutron-openvswitch | b404c18a506c4dcb0967b85cbafb0d238bc3f773 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2020-03-25T20:07:06.000Z | 2021-03-25T19:54:26.000Z | # Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Bootstrap charm-helpers, installing its dependencies if necessary using
# only standard libraries.
from __future__ import print_function
from __future__ import absolute_import
import functools
import inspect
import subprocess
import sys
try:
import six # NOQA:F401
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
import six # NOQA:F401
try:
import yaml # NOQA:F401
except ImportError:
if sys.version_info.major == 2:
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
else:
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
import yaml # NOQA:F401
# Holds a list of mapping of mangled function names that have been deprecated
# using the @deprecate decorator below. This is so that the warning is only
# printed once for each usage of the function.
__deprecated_functions = {}
def deprecate(warning, date=None, log=None):
"""Add a deprecation warning the first time the function is used.
The date, which is a string in semi-ISO8660 format indicate the year-month
that the function is officially going to be removed.
usage:
@deprecate('use core/fetch/add_source() instead', '2017-04')
def contributed_add_source_thing(...):
...
And it then prints to the log ONCE that the function is deprecated.
The reason for passing the logging function (log) is so that hookenv.log
can be used for a charm if needed.
:param warning: String to indicat where it has moved ot.
:param date: optional sting, in YYYY-MM format to indicate when the
function will definitely (probably) be removed.
:param log: The log function to call to log. If not, logs to stdout
"""
def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
try:
module = inspect.getmodule(f)
file = inspect.getsourcefile(f)
lines = inspect.getsourcelines(f)
f_name = "{}-{}-{}..{}-{}".format(
module.__name__, file, lines[0], lines[-1], f.__name__)
except (IOError, TypeError):
# assume it was local, so just use the name of the function
f_name = f.__name__
if f_name not in __deprecated_functions:
__deprecated_functions[f_name] = True
s = "DEPRECATION WARNING: Function {} is being removed".format(
f.__name__)
if date:
s = "{} on/around {}".format(s, date)
if warning:
s = "{} : {}".format(s, warning)
if log:
log(s)
else:
print(s)
return f(*args, **kwargs)
return wrapped_f
return wrap
| 36.346939 | 79 | 0.633352 |
2906bb96bd420a575de0e2c08b0206ef4d45e5c9 | 1,818 | py | Python | kitsune/announcements/tasks.py | turtleloveshoes/kitsune | 7e5524644eab7f608a44c44c63d242cda3aef7f0 | [
"BSD-3-Clause"
] | 1 | 2017-07-03T12:11:03.000Z | 2017-07-03T12:11:03.000Z | kitsune/announcements/tasks.py | rlr/kitsune | 591e996a3a115a7b235cbca19f5dec58fc9b6249 | [
"BSD-3-Clause"
] | 8 | 2020-06-05T18:42:14.000Z | 2022-03-11T23:26:51.000Z | kitsune/announcements/tasks.py | rlr/kitsune | 591e996a3a115a7b235cbca19f5dec58fc9b6249 | [
"BSD-3-Clause"
] | 1 | 2020-11-03T23:47:55.000Z | 2020-11-03T23:47:55.000Z | from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
import bleach
from celery import task
from tower import ugettext as _
from kitsune.announcements.models import Announcement
from kitsune.sumo.decorators import timeit
from kitsune.sumo.email_utils import make_mail, safe_translation, send_messages
@task()
@timeit
def send_group_email(announcement_id):
"""Build and send the announcement emails to a group."""
try:
announcement = Announcement.objects.get(pk=announcement_id)
except Announcement.DoesNotExist:
return
group = announcement.group
users = User.objects.filter(groups__in=[group])
plain_content = bleach.clean(announcement.content_parsed,
tags=[], strip=True).strip()
email_kwargs = {'content': plain_content,
'content_html': announcement.content_parsed,
'domain': Site.objects.get_current().domain}
text_template = 'announcements/email/announcement.ltxt'
html_template = 'announcements/email/announcement.html'
@safe_translation
def _make_mail(locale, user):
subject = _('New announcement for {group}').format(
group=group.name)
mail = make_mail(subject=subject,
text_template=text_template,
html_template=html_template,
context_vars=email_kwargs,
from_email=settings.TIDINGS_FROM_ADDRESS,
to_email=user.email)
return mail
messages = []
for u in users:
# Localize email each time.
locale = u.profile.locale or settings.LANGUAGE_CODE
messages.append(_make_mail(locale, u))
send_messages(messages)
| 33.666667 | 79 | 0.665017 |
12a64ad282463fd34cc1cf8075f0941d0366a00d | 12,684 | py | Python | laygo/generators/splash/clk_dis_htree_layout_generator.py | tinapiao/Software-IC-Automation | 74b23cd94aa6e4658b110e93b5deb635e014f3a6 | [
"BSD-3-Clause"
] | 26 | 2017-07-07T08:06:31.000Z | 2021-11-25T06:41:24.000Z | laygo/generators/splash/clk_dis_htree_layout_generator.py | tinapiao/Software-IC-Automation | 74b23cd94aa6e4658b110e93b5deb635e014f3a6 | [
"BSD-3-Clause"
] | 9 | 2016-12-28T03:08:29.000Z | 2019-01-30T16:00:28.000Z | laygo/generators/splash/clk_dis_htree_layout_generator.py | tinapiao/Software-IC-Automation | 74b23cd94aa6e4658b110e93b5deb635e014f3a6 | [
"BSD-3-Clause"
] | 10 | 2018-07-14T01:31:28.000Z | 2021-08-21T10:18:30.000Z |
#!/usr/bin/python
########################################################################################################################
#
# Copyright (c) 2014, Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################################################################################
"""ADC library
"""
import laygo
import numpy as np
import os
import yaml
#import logging;logging.basicConfig(level=logging.DEBUG)
def generate_clkdis_hcell(laygen, objectname_pfix, logictemp_lib, working_lib, grid, origin=np.array([0, 0]), ratio=2, trackm=4,
metal_v1=5, metal_h=4, metal_v2=5, pitch_h=100, len_v1=10, len_h=None, len_v2=10, offset=0, in_pin=0, out_pin=0, out_label=''):
"""generate htree cell """
if len_h == None:
len_h = pitch_h*(ratio-1)
else:
if len_h < pitch_h*(ratio-1):
print("horizental length is too small, please redefine it!")
return
if (metal_v1-metal_h == -1):
rg1 = grid['rg_m'+str(metal_v1)+'m'+str(metal_h)]
elif(metal_h-metal_v1 == -1):
rg1 = grid['rg_m'+str(metal_h)+'m'+str(metal_v1)]
else:
print('Some error with metal assignment!')
return
#print(rg1)
if (metal_h-metal_v2 == -1):
rg2 = grid['rg_m'+str(metal_h)+'m'+str(metal_v2)]
elif(metal_v2-metal_h == -1):
rg2 = grid['rg_m'+str(metal_v2)+'m'+str(metal_h)]
else:
print('Some error with metal assignment!')
return
len_v1 = laygen.grids.get_absgrid_coord_y(gridname=rg1, y=len_v1)
len_h = laygen.grids.get_absgrid_coord_x(gridname=rg1, x=len_h)
len_v2 = laygen.grids.get_absgrid_coord_y(gridname=rg2, y=len_v2)
#print(rg2)
## vertical tracks 1 and vias
for i in range(trackm):
vp1x=laygen.route(None, laygen.layers['metal'][metal_v1], xy0=np.array([origin[0]+2*i, origin[1]]), xy1=np.array([origin[0]+2*i, origin[1]-len_v1-2*trackm+2]), gridname0=rg1)
if in_pin==1:
laygen.boundary_pin_from_rect(vp1x, gridname=rg1, name='WI_' + str(i),
layer=laygen.layers['pin'][metal_v1], size=2, direction='top',
netname='W')
for j in range(trackm):
laygen.via(None, xy=np.array([origin[0]+2*i,origin[1]-len_v1-2*j]), gridname=rg1)
## horizental tracks
for i in range(trackm):
laygen.route(None, laygen.layers['metal'][metal_h], xy0=np.array([origin[0]-len_h/2+offset, origin[1]-len_v1-2*i]), xy1=np.array([origin[0]+len_h/2+2*trackm-2+offset, origin[1]-len_v1-2*i]), gridname0=rg1)
## vertical tracks 2
out_xy = []
for k in range(ratio):
for i in range(trackm):
vp2x=laygen.route(None, laygen.layers['metal'][metal_v2], xy0=np.array([origin[0]+2*i-(ratio-1)/2*len_h+k*len_h+offset, origin[1]-len_v1]),
xy1=np.array([origin[0]+2*i-(ratio-1)/2*len_h+k*len_h+offset, origin[1]-len_v1-2*trackm+2-len_v2]), gridname0=rg2)
if out_pin==1:
laygen.boundary_pin_from_rect(vp2x, gridname=rg1,
name='WO' + str(out_label) + '_' + str(k) + '_' + str(i),
layer=laygen.layers['pin'][metal_v1], size=2, direction='bottom',
netname='W')
#print('WO'+str(out_label)+'_'+str(k)+'_'+str(i))
for j in range(trackm):
laygen.via(None, xy=np.array([origin[0]+2*i-(ratio-1)/2*len_h+k*len_h+offset,origin[1]-len_v1-2*j]), gridname=rg1)
out_xy.append(np.array([origin[0]-(ratio-1)/2*len_h+k*len_h+offset, origin[1]-len_v1-2*trackm+2-len_v2]))
#print(out_xy)
return out_xy
def generate_clkdis_htree(laygen, objectname_pfix, logictemp_lib, working_lib, grid, origin=np.array([0, 0]), level=2, trackm=2, ratio=[2, 2], metal_v1=[5, 5],
metal_h=[4, 4], metal_v2=[5, 5], metal=[5, 5], pitch_h=[20.16*2, 20.16], len_v1=[1, 1], len_h=[None, None], len_v2=[1, 1], offset=[0, 0]):
#grid, level, ratio, metal_v1, metal_h, metal_v2, pitch_h, len_v1, len_v2, offset):
"""generate htree"""
if ( len(params['metal_v1'])!=params['level'] or len(params['metal_h'])!=params['level'] or len(params['metal_v2'])!=params['level'] or
len(params['pitch_h'])!=params['level'] or len(params['len_v1'])!=params['level'] or len(params['len_h'])!=params['level']or
len(params['len_v2'])!=params['level'] ):
print("\nThere's some error with your array size. Please check it!\n")
return
cell_params=[]
for i in range(params['level']):
params0 = dict(
#track number
trackm = params['trackm'],
#divide ratio
ratio = params['ratio'][i],
#metal layer
metal_v1 = params['metal_v1'][i],
metal_h = params['metal_h'][i],
metal_v2 = params['metal_v2'][i],
#pitch between output
pitch_h = params['pitch_h'][i],
#length
len_v1 = params['len_v1'][i],
len_h = params['len_h'][i],
len_v2 = params['len_v2'][i],
#offset
offset = params['offset'][i],
#in_pin
in_pin = 0,
#out_pin
out_pin = 0,
#out_label
out_label='',
)
## Changing in/out pins
if i==0:
if i== params['level']-1:
params0['in_pin'] = 1
params0['out_pin'] = 1
else:
params0['in_pin'] = 1
params0['out_pin'] = 0
elif i== params['level']-1:
params0['in_pin'] = 0
params0['out_pin'] = 1
else:
params0['in_pin'] = 0
params0['out_pin'] = 0
cell_params.append(params0)
for k in range(params['level']):
#Calculat how many leaves at level k
if k == 0:
level_ratio = 1
old_orig_xy = [np.array([0, 0]),]
else:
level_ratio = level_ratio*params['ratio'][k-1]
old_orig_xy = orig_xy
orig_xy = []
for i in range(level_ratio):
if k==params['level']-1:
cell_params[k]['out_label']=str(i)
orig_xy0 = generate_clkdis_hcell(laygen, objectname_pfix='HCELL_'+str(k)+'_'+str(i), logictemp_lib=logictemplib, working_lib=workinglib, grid=grid,
origin=old_orig_xy[i], **cell_params[k])
#print(orig_xy0)
for orig in orig_xy0:
orig_xy.append(orig)
if __name__ == '__main__':
laygen = laygo.GridLayoutGenerator(config_file="laygo_config.yaml")
import imp
try:
imp.find_module('bag')
laygen.use_phantom = False
except ImportError:
laygen.use_phantom = True
tech=laygen.tech
utemplib = tech+'_microtemplates_dense'
logictemplib = tech+'_logic_templates'
laygen.load_template(filename=tech+'_microtemplates_dense_templates.yaml', libname=utemplib)
laygen.load_grid(filename=tech+'_microtemplates_dense_grids.yaml', libname=utemplib)
laygen.load_template(filename=logictemplib+'.yaml', libname=logictemplib)
laygen.templates.sel_library(utemplib)
laygen.grids.sel_library(utemplib)
# library load or generation
workinglib = 'clk_dis_generated'
laygen.add_library(workinglib)
laygen.sel_library(workinglib)
if os.path.exists(workinglib + '.yaml'): # generated layout file exists
laygen.load_template(filename=workinglib + '.yaml', libname=workinglib)
laygen.templates.sel_library(utemplib)
#grid
grid = dict(
pg = 'placement_basic', #placement grid
rg_m1m2 = 'route_M1_M2_cmos',
rg_m1m2_thick = 'route_M1_M2_basic_thick',
rg_m2m3 = 'route_M2_M3_cmos',
rg_m2m3_thick = 'route_M2_M3_thick',
rg_m2m3_thick2 = 'route_M2_M3_thick2',
rg_m3m4 = 'route_M3_M4_basic',
rg_m3m4_dense = 'route_M3_M4_dense',
rg_m4m5 = 'route_M4_M5_basic',
rg_m5m6 = 'route_M5_M6_basic',
rg_m6m7 = 'route_M6_M7_basic',
rg_m1m2_pin = 'route_M1_M2_basic',
rg_m2m3_pin = 'route_M2_M3_basic',
)
#parameters
pitch_x=laygen.get_template_xy(name='clk_dis_viadel_cell', libname=workinglib)[0]
params = dict(
#stage
level = 2,
#track number
trackm = 12,
#divide ratio
ratio = [2, 2],
#metal layer
metal_v1 = [5, 5],
metal_h = [4, 4],
metal_v2 = [5, 5],
#pitch between output
#pitch_h = [100, 50], #um
pitch_h = [pitch_x*2, pitch_x], #um
#length
len_v1 = [1, 0.5], #um
len_h = [None, None], #um
len_v2 = [0.5, 1], #um
#offset
offset = [0, 0], #um
)
#load from preset
load_from_file=True
yamlfile_spec="adc_sar_spec.yaml"
yamlfile_size="adc_sar_size.yaml"
if load_from_file==True:
#load parameters
with open(yamlfile_spec, 'r') as stream:
specdict = yaml.load(stream)
with open(yamlfile_size, 'r') as stream:
sizedict = yaml.load(stream)
lvl=int(np.log2(specdict['n_interleave']/2))
ratio=2
if lvl==0:
lvl=1
ratio=1
params = dict(
#stage
level = lvl,
#track number
trackm = 24,
#divide ratio
ratio = [ratio]*lvl,
#metal layer
metal_v1 = [5]*lvl,
metal_h = [4]*lvl,
metal_v2 = [5]*lvl,
#pitch between output
#pitch_h = [100, 50], #um
pitch_h = [pitch_x*int(2**(lvl-i-1)) for i in range(lvl)],
#length
len_v1 = [1]*lvl, #um
len_h = [None]*lvl, #um
len_v2 = [1]*lvl, #um
#offset
offset = [0]*lvl, #um
)
#load from preset
load_from_file=True
yamlfile_spec="adc_sar_spec.yaml"
yamlfile_size="adc_sar_size.yaml"
if load_from_file==True:
with open(yamlfile_spec, 'r') as stream:
specdict = yaml.load(stream)
with open(yamlfile_size, 'r') as stream:
sizedict = yaml.load(stream)
params['trackm']=sizedict['clk_dis_htree']['m_track']
print(workinglib)
mycell_list=[]
cellname='clk_dis_htree'
print(cellname+" generating")
mycell_list.append(cellname)
laygen.add_cell(cellname)
laygen.sel_cell(cellname)
generate_clkdis_htree(laygen, objectname_pfix='HTREE', logictemp_lib=logictemplib, working_lib=workinglib, grid=grid, **params)
laygen.add_template_from_cell()
print(mycell_list)
laygen.save_template(filename=workinglib+'.yaml', libname=workinglib)
#bag export, if bag does not exist, gds export
import imp
try:
imp.find_module('bag')
import bag
prj = bag.BagProject()
for mycell in mycell_list:
laygen.sel_cell(mycell)
laygen.export_BAG(prj, array_delimiter=['[', ']'])
except ImportError:
laygen.export_GDS('output.gds', cellname=mycell_list, layermapfile=tech+".layermap") # change layermapfile
| 40.394904 | 213 | 0.585304 |
18d35d1bf0769171fe1d14d95aae2c8384d5a0be | 13,673 | py | Python | oasislmf/cli/api.py | augustoproiete-forks/OasisLMF--OasisLMF | 560749e9dd7d8bd84307cd2767517b3e1d3a1c01 | [
"BSD-3-Clause"
] | null | null | null | oasislmf/cli/api.py | augustoproiete-forks/OasisLMF--OasisLMF | 560749e9dd7d8bd84307cd2767517b3e1d3a1c01 | [
"BSD-3-Clause"
] | null | null | null | oasislmf/cli/api.py | augustoproiete-forks/OasisLMF--OasisLMF | 560749e9dd7d8bd84307cd2767517b3e1d3a1c01 | [
"BSD-3-Clause"
] | null | null | null | import getpass
import io
import json
import sys
from argparse import RawDescriptionHelpFormatter
from ..api.client import APIClient
from ..utils.exceptions import OasisException
from ..utils.path import PathCleaner
from ..utils.defaults import API_EXAMPLE_AUTH
from .base import OasisBaseCommand
from .inputs import InputValues
def load_credentials(login_arg, logger=None):
"""
Load credentials from JSON file
Options:
1. '--api-server-login ./APIcredentials.json'
2. Load credentials from default config file
'-C oasislmf.json'
3. Prompt for username / password
"""
if isinstance(login_arg, str):
with io.open(login_arg, encoding='utf-8') as f:
return json.load(f)
elif isinstance(login_arg, dict):
if {'password', 'username'} <= {k for k in login_arg.keys()}:
return login_arg
else:
logger.info('API Login:')
try:
api_login = {}
api_login['username'] = input('Username: ')
api_login['password'] = getpass.getpass('Password: ')
return api_login
except KeyboardInterrupt as e:
logger.error('\nFailed to get API login details:')
logger.error(e)
sys.exit(1)
def open_api_connection(input_args, logger):
try:
## If no password given try the reference example
return APIClient(
api_url=input_args.get('api_server_url'),
api_ver='V1',
username=API_EXAMPLE_AUTH['user'],
password=API_EXAMPLE_AUTH['pass'],
logger=logger
)
except OasisException:
## Prompt for password and try to re-autehnticate
try:
credentials = load_credentials(input_args.get('api_server_login'), logger=logger)
logger.info('Connecting to - {}'.format(input_args.get('api_server_url')))
return APIClient(
api_url=input_args.get('api_server_url'),
api_ver='V1',
username=credentials['username'],
password=credentials['password'],
logger=logger
)
except OasisException as e:
logger.error('API Connection error:')
logger.error(e)
sys.exit(1)
class GetApiCmd(OasisBaseCommand):
"""
Issue API GET requests via the command line
"""
formatter_class = RawDescriptionHelpFormatter
def add_args(self, parser):
super(self.__class__, self).add_args(parser)
parser.add_argument(
'-u', '--api-server-url', type=str,
default="http://localhost:8000",
help='Oasis API server URL (including protocol and port), e.g. http://localhost:8000',
)
parser.add_argument(
'-l', '--api-server-login', type=PathCleaner('credentials file', preexists=False), default=None,
help='Json file with {"username":"<USER>", "password":"<PASS>"}', required=False,
)
parser.add_argument(
'-m', '--models', type=bool, const=True, default=False, nargs='?', required=False,
help='Fetch the list of stored models',
)
parser.add_argument(
'-p', '--portfolios', type=bool, const=True, default=False, nargs='?', required=False,
help='Fetch the list of stored portfolios',
)
parser.add_argument(
'-a', '--analyses', type=bool, const=True, default=False, nargs='?', required=False,
help='Fetch the list of stored analyses',
)
def action(self, args):
inputs = InputValues(args)
api = open_api_connection(inputs, self.logger)
if args.models:
resp = api.models.get()
self.logger.info(json.dumps(resp.json(), indent=4, sort_keys=True))
if args.portfolios:
resp = api.portfolios.get()
self.logger.info(json.dumps(resp.json(), indent=4, sort_keys=True))
if args.analyses:
resp = api.analyses.get()
self.logger.info(json.dumps(resp.json(), indent=4, sort_keys=True))
class DelApiCmd(OasisBaseCommand):
formatter_class = RawDescriptionHelpFormatter
def add_args(self, parser):
super(self.__class__, self).add_args(parser)
parser.add_argument(
'-u', '--api-server-url', type=str,
default="http://localhost:8000",
help='Oasis API server URL (including protocol and port), e.g. http://localhost:8000',
)
parser.add_argument(
'-l', '--api-server-login', type=PathCleaner('credentials file', preexists=False), default=None,
help='Json file with {"username":"<USER>", "password":"<PASS>"}', required=False,
)
parser.add_argument(
'-y', '--api-no-confirm', type=bool, default=False, const=True, nargs='?', required=False,
help='Skip confirmation prompt before altering the API resources',
)
parser.add_argument(
'-m', '--model-id', type=int, default=None, required=False,
help='Model ID to delete',
)
parser.add_argument(
'-p', '--portfolio-id', type=int, default=None, required=False,
help='Portfolio ID to delete',
)
parser.add_argument(
'-a', '--analysis-id', type=int, default=None, required=False,
help='Analysis ID to delete',
)
def action(self, args):
inputs = InputValues(args)
api = open_api_connection(inputs, self.logger)
warn_msg = 'Delete this record from the API?'
try:
if args.model_id:
id_ref = inputs.get('model_id')
r = api.models.get(id_ref)
r.raise_for_status()
self.logger.info(json.dumps(r.json(), indent=4, sort_keys=True))
if inputs.confirm_action(warn_msg, args.api_no_confirm):
r = api.models.delete(id_ref)
r.raise_for_status()
self.logger.info('Record deleted')
if args.portfolio_id:
id_ref = inputs.get('portfolio_id')
r = api.portfolios.get(id_ref)
r.raise_for_status()
self.logger.info(json.dumps(r.json(), indent=4, sort_keys=True))
if inputs.confirm_action(warn_msg, args.api_no_confirm):
r = api.portfolios.delete(id_ref)
r.raise_for_status()
self.logger.info('Record deleted')
if args.analysis_id:
id_ref = inputs.get('analysis_id')
r = api.analyses.get(id_ref)
r.raise_for_status()
self.logger.info(json.dumps(r.json(), indent=4, sort_keys=True))
if inputs.confirm_action(warn_msg, args.api_no_confirm):
r = api.analyses.delete(id_ref)
r.raise_for_status()
self.logger.info('Record deleted')
except Exception as e:
self.logger.error(e)
self.logger.error("Error on delete ref({}):".format(id_ref))
self.logger.error(r.text)
class PutApiModelCmd(OasisBaseCommand):
formatter_class = RawDescriptionHelpFormatter
def add_args(self, parser):
super(self.__class__, self).add_args(parser)
parser.add_argument(
'-u', '--api-server-url', type=str,
default="http://localhost:8000",
help='Oasis API server URL (including protocol and port), e.g. http://localhost:8000',
)
parser.add_argument(
'-l', '--api-server-login',
type=PathCleaner('credentials file', preexists=False), default=None,
help='Json file with {"username":"<USER>", "password":"<PASS>"}', required=False,
)
# Required
parser.add_argument(
'--supplier-id', type=str, default=None,
required=True, help='The supplier ID for the model.'
)
parser.add_argument(
'--model-id', type=str, default=None,
required=True, help='The model ID for the model.'
)
parser.add_argument(
'--version-id', type=str, default=None,
required=True, help='The version ID for the model.'
)
def action(self, args):
inputs = InputValues(args)
api = open_api_connection(inputs, self.logger)
api.models.create(
supplier_id=inputs.get('supplier_id'),
model_id=inputs.get('model_id'),
version_id=inputs.get('version_id'),
)
class RunApiCmd(OasisBaseCommand):
formatter_class = RawDescriptionHelpFormatter
def add_args(self, parser):
super(self.__class__, self).add_args(parser)
# API Connection
parser.add_argument(
'-u', '--api-server-url', type=str,
default="http://localhost:8000",
help='Oasis API server URL (including protocol and port), e.g. http://localhost:8000',
)
parser.add_argument(
'-l', '--api-server-login',
type=PathCleaner('credentials file', preexists=False), default=None,
help='Json file with {"username":"<USER>", "password":"<PASS>"}', required=False,
)
# Required
parser.add_argument(
'-m', '--model-id', type=int, default=None,
required=False,
help='API `id` of a model to run the analysis with'
)
parser.add_argument(
'-a', '--analysis-settings-json',
type=PathCleaner('analysis settings file'), default=None,
help='Analysis settings JSON file path'
)
parser.add_argument('-x', '--oed-location-csv', type=PathCleaner('OED location file'), default=None, help='Source exposure CSV file path')
parser.add_argument('-y', '--oed-accounts-csv', type=PathCleaner('OED accounts file'), default=None, help='Source accounts CSV file path')
parser.add_argument('-i', '--oed-info-csv', type=PathCleaner('OED Reinsurances info file'), default=None, help='Reinsurance info. CSV file path')
parser.add_argument('-s', '--oed-scope-csv', type=PathCleaner('OED Reinsurances scope file'), default=None, help='Reinsurance scope CSV file path')
parser.add_argument('-o', '--output-dir', type=PathCleaner('Output directory', preexists=False), default='./', help="Output data directory (absolute or relative file path)")
def _select_model(self, avalible_models):
# list options
for i in range(len(avalible_models)):
self.logger.info('{} \t {}-{}-{}'.format(
i,
avalible_models[i]['supplier_id'],
avalible_models[i]['model_id'],
avalible_models[i]['version_id'],
))
# Fetch user choice
while True:
try:
value = int(input('Select model: '))
except ValueError:
self.logger.info('Invalid Response: {}'.format(value))
continue
except KeyboardInterrupt:
exit(1)
if (value < 0) or (value >= len(avalible_models)):
self.logger.info('Invalid Response: {}'.format(value))
continue
else:
break
return avalible_models[value]
def action(self, args):
inputs = InputValues(args)
api = open_api_connection(inputs, self.logger)
# Upload files
path_location = inputs.get('oed_location_csv')
path_account = inputs.get('oed_accounts_csv')
path_info = inputs.get('oed_info_csv')
path_scope = inputs.get('oed_scope_csv')
portfolio = api.upload_inputs(
portfolio_id=None,
location_fp=path_location,
accounts_fp=path_account,
ri_info_fp=path_info,
ri_scope_fp=path_scope,
)
model_id = inputs.get('model_id')
if not model_id:
avalible_models = api.models.get().json()
if len(avalible_models) > 1:
selected_model = self._select_model(avalible_models)
elif len(avalible_models) == 1:
selected_model = avalible_models[0]
else:
raise OasisException(
'No models found in API: {}'.format(inputs.get('api_server_url'))
)
model_id = selected_model['id']
self.logger.info('Running model:')
self.logger.info(json.dumps(selected_model, indent=4))
# Create new analysis
path_settings = inputs.get('analysis_settings_json')
if not path_settings:
self.logger.error('analysis settings: Not found')
return False
analysis = api.create_analysis(
portfolio_id=portfolio['id'],
model_id=model_id,
analysis_settings_fp=path_settings,
)
self.logger.info('Loaded analysis settings:')
self.logger.info(json.dumps(analysis, indent=4))
# run and poll
api.run_generate(analysis['id'], poll_interval=3)
api.run_analysis(analysis['id'], poll_interval=3)
# Download Outputs
api.download_output(
analysis_id=analysis['id'],
download_path=inputs.get('output_dir'),
overwrite=True,
clean_up=False
)
class ApiCmd(OasisBaseCommand):
sub_commands = {
'list': GetApiCmd,
'add-model': PutApiModelCmd,
'delete': DelApiCmd,
'run': RunApiCmd
}
| 37.357923 | 181 | 0.580999 |
a1681fcb135caed5a8d6681297e0adf162a4c88f | 156,321 | py | Python | lib/galaxy/tools/__init__.py | alex-k8s/galaxy | d37bda41ebbde79c1c0a77d8490505bb940de38d | [
"CC-BY-3.0"
] | 1 | 2021-12-14T15:19:25.000Z | 2021-12-14T15:19:25.000Z | lib/galaxy/tools/__init__.py | Stark-F/galaxy | 7f0519ff20ac90736ad86d06ff7bae17e8456c36 | [
"CC-BY-3.0"
] | 11 | 2021-06-08T00:26:51.000Z | 2021-06-22T16:12:03.000Z | lib/galaxy/tools/__init__.py | alex-k8s/galaxy | d37bda41ebbde79c1c0a77d8490505bb940de38d | [
"CC-BY-3.0"
] | null | null | null | """
Classes encapsulating galaxy tools and tool configuration.
"""
import itertools
import json
import logging
import os
import re
import tarfile
import tempfile
import threading
from datetime import datetime
from pathlib import Path
from typing import cast, Dict, List, NamedTuple, Optional, Tuple, Type, Union
from urllib.parse import unquote_plus
import packaging.version
import webob.exc
from lxml import etree
from mako.template import Template
from pkg_resources import resource_string
from webob.compat import cgi_FieldStorage
from galaxy import (
exceptions,
model
)
from galaxy.exceptions import ToolInputsNotReadyException
from galaxy.job_execution import output_collect
from galaxy.metadata import get_metadata_compute_strategy
from galaxy.tool_shed.util.repository_util import get_installed_repository
from galaxy.tool_shed.util.shed_util_common import set_image_paths
from galaxy.tool_util.deps import (
CachedDependencyManager,
)
from galaxy.tool_util.fetcher import ToolLocationFetcher
from galaxy.tool_util.loader import (
imported_macro_paths,
raw_tool_xml_tree,
template_macro_params
)
from galaxy.tool_util.output_checker import DETECTED_JOB_STATE
from galaxy.tool_util.parser import (
get_tool_source,
get_tool_source_from_representation,
RequiredFiles,
ToolOutputCollectionPart
)
from galaxy.tool_util.parser.xml import XmlPageSource
from galaxy.tool_util.provided_metadata import parse_tool_provided_metadata
from galaxy.tool_util.toolbox import BaseGalaxyToolBox
from galaxy.tools import expressions
from galaxy.tools.actions import DefaultToolAction
from galaxy.tools.actions.data_manager import DataManagerToolAction
from galaxy.tools.actions.data_source import DataSourceToolAction
from galaxy.tools.actions.model_operations import ModelOperationToolAction
from galaxy.tools.cache import ToolDocumentCache
from galaxy.tools.imp_exp import JobImportHistoryArchiveWrapper
from galaxy.tools.parameters import (
check_param,
params_from_strings,
params_to_incoming,
params_to_strings,
populate_state,
visit_input_values
)
from galaxy.tools.parameters.basic import (
BaseURLToolParameter,
DataCollectionToolParameter,
DataToolParameter,
HiddenToolParameter,
ImplicitConversionRequired,
SelectToolParameter,
ToolParameter,
workflow_building_modes,
)
from galaxy.tools.parameters.dataset_matcher import (
set_dataset_matcher_factory,
unset_dataset_matcher_factory,
)
from galaxy.tools.parameters.grouping import Conditional, ConditionalWhen, Repeat, Section, UploadDataset
from galaxy.tools.parameters.input_translation import ToolInputTranslator
from galaxy.tools.parameters.meta import expand_meta_parameters
from galaxy.tools.parameters.wrapped_json import json_wrap
from galaxy.tools.test import parse_tests
from galaxy.util import (
in_directory,
listify,
Params,
parse_xml_string,
rst_to_html,
string_as_bool,
unicodify,
XML,
)
from galaxy.util.bunch import Bunch
from galaxy.util.dictifiable import Dictifiable
from galaxy.util.expressions import ExpressionContext
from galaxy.util.form_builder import SelectField
from galaxy.util.json import safe_loads
from galaxy.util.rules_dsl import RuleSet
from galaxy.util.template import (
fill_template,
refactoring_tool,
)
from galaxy.util.tool_shed.common_util import (
get_tool_shed_repository_url,
get_tool_shed_url_from_tool_shed_registry,
)
from galaxy.version import VERSION_MAJOR
from galaxy.work.context import proxy_work_context_for_history
from .execute import (
execute as execute_job,
MappingParameters,
)
log = logging.getLogger(__name__)
REQUIRES_JS_RUNTIME_MESSAGE = ("The tool [%s] requires a nodejs runtime to execute "
"but node or nodejs could not be found. Please contact the Galaxy adminstrator")
HELP_UNINITIALIZED = threading.Lock()
MODEL_TOOLS_PATH = os.path.abspath(os.path.dirname(__file__))
# Tools that require Galaxy's Python environment to be preserved.
GALAXY_LIB_TOOLS_UNVERSIONED = [
"upload1",
"send_to_cloud",
"__DATA_FETCH__",
"directory_uri",
"export_remote",
# Legacy tools bundled with Galaxy.
"laj_1",
"gff2bed1",
"gff_filter_by_feature_count",
"Interval_Maf_Merged_Fasta2",
"GeneBed_Maf_Fasta2",
"maf_stats1",
"Interval2Maf1",
"Interval2Maf_pairwise1",
"MAF_To_Interval1",
"MAF_filter",
"MAF_To_Fasta1",
"MAF_Reverse_Complement_1",
"MAF_split_blocks_by_species1",
"MAF_Limit_To_Species1",
"maf_by_block_number1",
# Converters
"CONVERTER_bed_to_fli_0",
"CONVERTER_gff_to_fli_0",
"CONVERTER_gff_to_interval_index_0",
"CONVERTER_maf_to_fasta_0",
"CONVERTER_maf_to_interval_0",
# Tools improperly migrated to the tool shed (devteam)
"qualityFilter",
"pileup_interval",
"count_gff_features",
"lastz_paired_reads_wrapper",
"subRate1",
"find_diag_hits",
# Tools improperly migrated using Galaxy (from shed other)
"column_join",
"gd_coverage_distributions", # Genome Diversity tools from miller-lab
"gd_dpmix",
"gd_pca",
"gd_phylogenetic_tree",
"gd_population_structure",
"gd_prepare_population_structure",
]
# Tools that needed galaxy on the PATH in the past but no longer do along
# with the version at which they were fixed.
GALAXY_LIB_TOOLS_VERSIONED = {
"meme_fimo": packaging.version.parse("5.0.5"),
"Extract genomic DNA 1": packaging.version.parse("3.0.0"),
"fetchflank": packaging.version.parse("1.0.1"),
"gops_intersect_1": packaging.version.parse("1.0.0"),
"lastz_wrapper_2": packaging.version.parse("1.3"),
"PEsortedSAM2readprofile": packaging.version.parse("1.1.1"),
"sam_to_bam": packaging.version.parse("1.1.3"),
"sam_pileup": packaging.version.parse("1.1.3"),
"vcf_to_maf_customtrack1": packaging.version.parse("1.0.1"),
"secure_hash_message_digest": packaging.version.parse("0.0.2"),
"join1": packaging.version.parse("2.1.3"),
"wiggle2simple1": packaging.version.parse("1.0.1"),
"CONVERTER_wiggle_to_interval_0": packaging.version.parse("1.0.1"),
"aggregate_scores_in_intervals2": packaging.version.parse("1.1.4"),
"CONVERTER_fastq_to_fqtoc0": packaging.version.parse("1.0.1"),
"CONVERTER_tar_to_directory": packaging.version.parse("1.0.1"),
"tabular_to_dbnsfp": packaging.version.parse("1.0.1"),
"cufflinks": packaging.version.parse("2.2.1.3"),
"Convert characters1": packaging.version.parse("1.0.1"),
"substitutions1": packaging.version.parse("1.0.1"),
"winSplitter": packaging.version.parse("1.0.1"),
}
BIOTOOLS_MAPPING_CONTENT = resource_string(__name__, 'biotools_mappings.tsv').decode("UTF-8")
BIOTOOLS_MAPPING: Dict[str, str] = dict([cast(Tuple[str, str], tuple(x.split("\t"))) for x in BIOTOOLS_MAPPING_CONTENT.splitlines() if not x.startswith("#")])
REQUIRE_FULL_DIRECTORY = {
"includes": [{"path": "**", "path_type": "glob"}],
}
IMPLICITLY_REQUIRED_TOOL_FILES: Dict[str, Dict] = {
"deseq2": {"version": packaging.version.parse("2.11.40.6"), "required": {"includes": [{"path": "*.R", "path_type": "glob"}]}},
# minimum example:
# "foobar": {"required": REQUIRE_FULL_DIRECTORY}
# if no version is specified, all versions without explicit RequiredFiles will be selected
}
class safe_update(NamedTuple):
min_version: Union[packaging.version.LegacyVersion, packaging.version.Version]
current_version: Union[packaging.version.LegacyVersion, packaging.version.Version]
# Tool updates that did not change parameters in a way that requires rebuilding workflows
WORKFLOW_SAFE_TOOL_VERSION_UPDATES = {
'Filter1': safe_update(packaging.version.parse("1.1.0"), packaging.version.parse("1.1.1")),
'__BUILD_LIST__': safe_update(packaging.version.parse("1.0.0"), packaging.version.parse("1.0.1")),
'__APPLY_RULES__': safe_update(packaging.version.parse("1.0.0"), packaging.version.parse("1.1.0")),
'__EXTRACT_DATASET__': safe_update(packaging.version.parse("1.0.0"), packaging.version.parse("1.0.1")),
'Grep1': safe_update(packaging.version.parse("1.0.1"), packaging.version.parse("1.0.3")),
'Show beginning1': safe_update(packaging.version.parse("1.0.0"), packaging.version.parse("1.0.1")),
'Show tail1': safe_update(packaging.version.parse("1.0.0"), packaging.version.parse("1.0.1")),
}
class ToolErrorLog:
def __init__(self):
self.error_stack = []
self.max_errors = 100
def add_error(self, file, phase, exception):
self.error_stack.insert(0, {
"file": file,
"time": str(datetime.now()),
"phase": phase,
"error": unicodify(exception)
})
if len(self.error_stack) > self.max_errors:
self.error_stack.pop()
global_tool_errors = ToolErrorLog()
class ToolNotFoundException(Exception):
pass
def create_tool_from_source(app, tool_source, config_file=None, **kwds):
# Allow specifying a different tool subclass to instantiate
tool_module = tool_source.parse_tool_module()
if tool_module is not None:
module, cls = tool_module
mod = __import__(module, globals(), locals(), [cls])
ToolClass = getattr(mod, cls)
elif tool_source.parse_tool_type():
tool_type = tool_source.parse_tool_type()
ToolClass = tool_types.get(tool_type)
else:
# Normal tool
root = getattr(tool_source, 'root', None)
ToolClass = Tool
tool = ToolClass(config_file, tool_source, app, **kwds)
return tool
class ToolBox(BaseGalaxyToolBox):
""" A derivative of AbstractToolBox with knowledge about Tool internals -
how to construct them, action types, dependency management, etc....
"""
def __init__(self, config_filenames, tool_root_dir, app, save_integrated_tool_panel=True):
self._reload_count = 0
self.tool_location_fetcher = ToolLocationFetcher()
self.cache_regions = {}
# This is here to deal with the old default value, which doesn't make
# sense in an "installed Galaxy" world.
# FIXME: ./
if tool_root_dir == './tools':
tool_root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'bundled'))
super().__init__(
config_filenames=config_filenames,
tool_root_dir=tool_root_dir,
app=app,
save_integrated_tool_panel=save_integrated_tool_panel,
)
def persist_cache(self, register_postfork=False):
"""
Persists any modified tool cache files to disk.
Set ``register_postfork`` to stop database thread queue,
close database connection and register re-open function
that re-opens the database after forking.
"""
for region in self.cache_regions.values():
if not region.disabled:
region.persist()
if register_postfork:
region.close()
self.app.application_stack.register_postfork_function(region.reopen_ro)
def can_load_config_file(self, config_filename):
if config_filename == self.app.config.shed_tool_config_file and not self.app.config.is_set('shed_tool_config_file'):
if self.dynamic_confs():
# Do not load or create a default shed_tool_config_file if another shed_tool_config file has already been loaded
return False
elif self.app.config.is_set('tool_config_file'):
log.warning(
"The default shed tool config file (%s) has been added to the tool_config_file option, if this is "
"not the desired behavior, please set shed_tool_config_file to your primary shed-enabled tool "
"config file", self.app.config.shed_tool_config_file
)
return True
def has_reloaded(self, other_toolbox):
return self._reload_count != other_toolbox._reload_count
@property
def all_requirements(self):
reqs = {req for _, tool in self.tools() for req in tool.tool_requirements}
return [r.to_dict() for r in reqs]
@property
def tools_by_id(self):
# Deprecated method, TODO - eliminate calls to this in test/.
return self._tools_by_id
def get_cache_region(self, tool_cache_data_dir):
if self.app.config.enable_tool_document_cache:
if tool_cache_data_dir not in self.cache_regions:
self.cache_regions[tool_cache_data_dir] = ToolDocumentCache(cache_dir=tool_cache_data_dir)
return self.cache_regions[tool_cache_data_dir]
def create_tool(self, config_file, tool_cache_data_dir=None, **kwds):
cache = self.get_cache_region(tool_cache_data_dir or self.app.config.tool_cache_data_dir)
if config_file.endswith('.xml') and cache and not cache.disabled:
tool_document = cache.get(config_file)
if tool_document:
tool_source = self.get_expanded_tool_source(
config_file=config_file,
xml_tree=etree.ElementTree(etree.fromstring(tool_document['document'].encode('utf-8'))),
macro_paths=tool_document['macro_paths']
)
else:
tool_source = self.get_expanded_tool_source(config_file)
cache.set(config_file, tool_source)
else:
tool_source = self.get_expanded_tool_source(config_file)
tool = self._create_tool_from_source(tool_source, config_file=config_file, **kwds)
if not self.app.config.delay_tool_initialization:
tool.assert_finalized(raise_if_invalid=True)
return tool
def get_expanded_tool_source(self, config_file, **kwargs):
try:
return get_tool_source(
config_file,
enable_beta_formats=getattr(self.app.config, "enable_beta_tool_formats", False),
tool_location_fetcher=self.tool_location_fetcher,
**kwargs,
)
except Exception as e:
# capture and log parsing errors
global_tool_errors.add_error(config_file, "Tool XML parsing", e)
raise e
def _create_tool_from_source(self, tool_source, **kwds):
return create_tool_from_source(self.app, tool_source, **kwds)
def create_dynamic_tool(self, dynamic_tool, **kwds):
tool_format = dynamic_tool.tool_format
tool_representation = dynamic_tool.value
tool_source = get_tool_source_from_representation(
tool_format=tool_format,
tool_representation=tool_representation,
)
kwds["dynamic"] = True
tool = self._create_tool_from_source(tool_source, **kwds)
tool.dynamic_tool = dynamic_tool
tool.uuid = dynamic_tool.uuid
if not tool.id:
tool.id = dynamic_tool.tool_id
if not tool.name:
tool.name = tool.id
return tool
def get_tool_components(self, tool_id, tool_version=None, get_loaded_tools_by_lineage=False, set_selected=False):
"""
Retrieve all loaded versions of a tool from the toolbox and return a select list enabling
selection of a different version, the list of the tool's loaded versions, and the specified tool.
"""
toolbox = self
tool_version_select_field = None
tools = []
tool = None
# Backwards compatibility for datasource tools that have default tool_id configured, but which
# are now using only GALAXY_URL.
tool_ids = listify(tool_id)
for tool_id in tool_ids:
if tool_id.endswith('/'):
# Some data sources send back redirects ending with `/`, this takes care of that case
tool_id = tool_id[:-1]
if get_loaded_tools_by_lineage:
tools = toolbox.get_loaded_tools_by_lineage(tool_id)
else:
tools = toolbox.get_tool(tool_id, tool_version=tool_version, get_all_versions=True)
if tools:
tool = toolbox.get_tool(tool_id, tool_version=tool_version, get_all_versions=False)
if len(tools) > 1:
tool_version_select_field = self.__build_tool_version_select_field(tools, tool.id, set_selected)
break
return tool_version_select_field, tools, tool
def _path_template_kwds(self):
return {
"model_tools_path": MODEL_TOOLS_PATH,
}
def _get_tool_shed_repository(self, tool_shed, name, owner, installed_changeset_revision):
# Abstract toolbox doesn't have a dependency on the database, so
# override _get_tool_shed_repository here to provide this information.
return get_installed_repository(
self.app,
tool_shed=tool_shed,
name=name,
owner=owner,
installed_changeset_revision=installed_changeset_revision,
from_cache=True,
)
def __build_tool_version_select_field(self, tools, tool_id, set_selected):
"""Build a SelectField whose options are the ids for the received list of tools."""
options = []
for tool in tools:
options.insert(0, (tool.version, tool.id))
select_field = SelectField(name='tool_id')
for option_tup in options:
selected = set_selected and option_tup[1] == tool_id
if selected:
select_field.add_option(f'version {option_tup[0]}', option_tup[1], selected=True)
else:
select_field.add_option(f'version {option_tup[0]}', option_tup[1])
return select_field
class DefaultToolState:
"""
Keeps track of the state of a users interaction with a tool between
requests.
"""
def __init__(self):
self.page = 0
self.rerun_remap_job_id = None
self.inputs = {}
def initialize(self, trans, tool):
"""
Create a new `DefaultToolState` for this tool. It will be initialized
with default values for inputs. Grouping elements are filled in recursively.
"""
self.inputs = {}
context = ExpressionContext(self.inputs)
for input in tool.inputs.values():
self.inputs[input.name] = input.get_initial_value(trans, context)
def encode(self, tool, app, nested=False):
"""
Convert the data to a string
"""
value = params_to_strings(tool.inputs, self.inputs, app, nested=nested)
value["__page__"] = self.page
value["__rerun_remap_job_id__"] = self.rerun_remap_job_id
return value
def decode(self, values, tool, app):
"""
Restore the state from a string
"""
values = safe_loads(values) or {}
self.page = values.pop("__page__") if "__page__" in values else None
self.rerun_remap_job_id = values.pop("__rerun_remap_job_id__") if "__rerun_remap_job_id__" in values else None
self.inputs = params_from_strings(tool.inputs, values, app, ignore_errors=True)
def copy(self):
"""
Shallow copy of the state
"""
new_state = DefaultToolState()
new_state.page = self.page
new_state.rerun_remap_job_id = self.rerun_remap_job_id
new_state.inputs = self.inputs
return new_state
class Tool(Dictifiable):
"""
Represents a computational tool that can be executed through Galaxy.
"""
tool_type = 'default'
requires_setting_metadata = True
produces_entry_points = False
default_tool_action = DefaultToolAction
tool_type_local = False
dict_collection_visible_keys = ['id', 'name', 'version', 'description', 'labels']
def __init__(self, config_file, tool_source, app, guid=None, repository_id=None, tool_shed_repository=None, allow_code_files=True, dynamic=False):
"""Load a tool from the config named by `config_file`"""
# Determine the full path of the directory where the tool config is
if config_file is not None:
self.config_file = config_file
self.tool_dir = os.path.dirname(config_file)
else:
self.config_file = None
self.tool_dir = None
self.app = app
self.repository_id = repository_id
self._allow_code_files = allow_code_files
# setup initial attribute values
self.stdio_exit_codes = list()
self.stdio_regexes = list()
self.inputs_by_page = list()
self.display_by_page = list()
self.action = '/tool_runner/index'
self.target = 'galaxy_main'
self.method = 'post'
self.labels = []
self.check_values = True
self.nginx_upload = False
self.input_required = False
self.display_interface = True
self.require_login = False
self.rerun = False
# This will be non-None for tools loaded from the database (DynamicTool objects).
self.dynamic_tool = None
# Define a place to keep track of all input These
# differ from the inputs dictionary in that inputs can be page
# elements like conditionals, but input_params are basic form
# parameters like SelectField objects. This enables us to more
# easily ensure that parameter dependencies like index files or
# tool_data_table_conf.xml entries exist.
self.input_params = []
# Attributes of tools installed from Galaxy tool sheds.
self.tool_shed = None
self.repository_name = None
self.repository_owner = None
self.changeset_revision = None
self.installed_changeset_revision = None
self.sharable_url = None
# The tool.id value will be the value of guid, but we'll keep the
# guid attribute since it is useful to have.
self.guid = guid
self.old_id = None
self.version = None
self.python_template_version = None
self._lineage = None
self.dependencies = []
# populate toolshed repository info, if available
self.populate_tool_shed_info(tool_shed_repository)
# add tool resource parameters
self.populate_resource_parameters(tool_source)
self.tool_errors = None
# Parse XML element containing configuration
self.tool_source = tool_source
self._is_workflow_compatible = None
self.finalized = False
try:
self.parse(tool_source, guid=guid, dynamic=dynamic)
except Exception as e:
global_tool_errors.add_error(config_file, "Tool Loading", e)
raise e
# The job search is only relevant in a galaxy context, and breaks
# loading tools into the toolshed for validation.
if self.app.name == 'galaxy':
self.job_search = self.app.job_search
def __getattr__(self, name):
lazy_attributes = {
'action',
'check_values',
'display_by_page',
'enctype',
'has_multiple_pages',
'inputs',
'inputs_by_page',
'last_page',
'method',
'npages',
'nginx_upload',
'target',
'template_macro_params',
'outputs',
'output_collections'
}
if name in lazy_attributes:
self.assert_finalized()
return getattr(self, name)
raise AttributeError(name)
def assert_finalized(self, raise_if_invalid=False):
if self.finalized is False:
try:
self.parse_inputs(self.tool_source)
self.parse_outputs(self.tool_source)
self.finalized = True
except Exception:
toolbox = getattr(self.app, 'toolbox', None)
if toolbox:
toolbox.remove_tool_by_id(self.id)
if raise_if_invalid:
raise
else:
log.warning("An error occured while parsing the tool wrapper xml, the tool is not functional", exc_info=True)
def remove_from_cache(self):
source_path = self.tool_source._source_path
if source_path:
for region in self.app.toolbox.cache_regions.values():
region.delete(source_path)
@property
def history_manager(self):
return self.app.history_manager
@property
def _view(self):
return self.app.dependency_resolvers_view
@property
def version_object(self):
return packaging.version.parse(self.version)
@property
def sa_session(self):
"""Returns a SQLAlchemy session"""
return self.app.model.context
@property
def lineage(self):
"""Return ToolLineage for this tool."""
return self._lineage
@property
def tool_versions(self):
# If we have versions, return them.
if self.lineage:
return list(self.lineage.tool_versions)
else:
return []
@property
def is_latest_version(self):
tool_versions = self.tool_versions
return not tool_versions or self.version == self.tool_versions[-1]
@property
def latest_version(self):
if self.is_latest_version:
return self
else:
return self.app.tool_cache.get_tool_by_id(self.lineage.get_versions()[-1].id)
@property
def is_datatype_converter(self):
return self in self.app.datatypes_registry.converter_tools
@property
def tool_shed_repository(self):
# If this tool is included in an installed tool shed repository, return it.
if self.tool_shed:
return get_installed_repository(self.app,
tool_shed=self.tool_shed,
name=self.repository_name,
owner=self.repository_owner,
installed_changeset_revision=self.installed_changeset_revision,
from_cache=True)
@property
def produces_collections_with_unknown_structure(self):
def output_is_dynamic(output):
if not output.collection:
return False
return output.dynamic_structure
return any(map(output_is_dynamic, self.outputs.values()))
@property
def valid_input_states(self):
return model.Dataset.valid_input_states
@property
def requires_galaxy_python_environment(self):
"""Indicates this tool's runtime requires Galaxy's Python environment."""
# All special tool types (data source, history import/export, etc...)
# seem to require Galaxy's Python.
# FIXME: the (instantiated) tool class should emit this behavior, and not
# use inspection by string check
if self.tool_type not in ["default", "manage_data", "interactive"]:
return True
if self.tool_type == "manage_data" and self.profile < 18.09:
return True
config = self.app.config
preserve_python_environment = config.preserve_python_environment
if preserve_python_environment == "always":
return True
elif preserve_python_environment == "legacy_and_local" and self.tool_shed is None:
return True
else:
unversioned_legacy_tool = self.old_id in GALAXY_LIB_TOOLS_UNVERSIONED
versioned_legacy_tool = self.old_id in GALAXY_LIB_TOOLS_VERSIONED
legacy_tool = unversioned_legacy_tool or \
(versioned_legacy_tool and self.version_object < GALAXY_LIB_TOOLS_VERSIONED[self.old_id])
return legacy_tool
def __get_job_tool_configuration(self, job_params=None):
"""Generalized method for getting this tool's job configuration.
:type job_params: dict or None
:returns: `galaxy.jobs.JobToolConfiguration` -- JobToolConfiguration that matches this `Tool` and the given `job_params`
"""
rval = None
if len(self.job_tool_configurations) == 1:
# If there's only one config, use it rather than wasting time on comparisons
rval = self.job_tool_configurations[0]
elif job_params is None:
for job_tool_config in self.job_tool_configurations:
if not job_tool_config.params:
rval = job_tool_config
break
else:
for job_tool_config in self.job_tool_configurations:
if job_tool_config.params:
# There are job params and this config has params defined
for param, value in job_params.items():
if param not in job_tool_config.params or job_tool_config.params[param] != value:
break
else:
# All params match, use this config
rval = job_tool_config
break
else:
rval = job_tool_config
assert rval is not None, f'Could not get a job tool configuration for Tool {self.id} with job_params {job_params}, this is a bug'
return rval
def get_configured_job_handler(self, job_params=None):
"""Get the configured job handler for this `Tool` given the provided `job_params`.
Unlike the former ``get_job_handler()`` method, this does not perform "preassignment" (random selection of
a configured handler ID from a tag).
:param job_params: Any params specific to this job (e.g. the job source)
:type job_params: dict or None
:returns: str or None -- The configured handler for a job run of this `Tool`
"""
return self.__get_job_tool_configuration(job_params=job_params).handler
def get_job_destination(self, job_params=None):
"""
:returns: galaxy.jobs.JobDestination -- The destination definition and runner parameters.
"""
return self.app.job_config.get_destination(self.__get_job_tool_configuration(job_params=job_params).destination)
def get_panel_section(self):
return self.app.toolbox.get_integrated_section_for_tool(self)
def allow_user_access(self, user, attempting_access=True):
"""
:returns: bool -- Whether the user is allowed to access the tool.
"""
if self.require_login and user is None:
return False
return True
def parse(self, tool_source, guid=None, dynamic=False):
"""
Read tool configuration from the element `root` and fill in `self`.
"""
self.profile = float(tool_source.parse_profile())
# Get the UNIQUE id for the tool
self.old_id = tool_source.parse_id()
if guid is None:
self.id = self.old_id
else:
self.id = guid
if not dynamic and not self.id:
raise Exception(f"Missing tool 'id' for tool at '{tool_source}'")
profile = packaging.version.parse(str(self.profile))
if profile >= packaging.version.parse("16.04") and packaging.version.parse(VERSION_MAJOR) < profile:
template = "The tool %s targets version %s of Galaxy, you should upgrade Galaxy to ensure proper functioning of this tool."
message = template % (self.id, self.profile)
raise Exception(message)
self.python_template_version = tool_source.parse_python_template_version()
if self.python_template_version is None:
# If python_template_version not specified we assume tools with profile versions >= 19.05 are python 3 ready
if self.profile >= 19.05:
self.python_template_version = packaging.version.parse('3.5')
else:
self.python_template_version = packaging.version.parse('2.7')
# Get the (user visible) name of the tool
self.name = tool_source.parse_name()
if not self.name and dynamic:
self.name = self.id
if not dynamic and not self.name:
raise Exception(f"Missing tool 'name' for tool with id '{self.id}' at '{tool_source}'")
self.version = tool_source.parse_version()
if not self.version:
if self.profile < 16.04:
# For backward compatibility, some tools may not have versions yet.
self.version = "1.0.0"
else:
raise Exception(f"Missing tool 'version' for tool with id '{self.id}' at '{tool_source}'")
self.edam_operations = tool_source.parse_edam_operations()
self.edam_topics = tool_source.parse_edam_topics()
# Support multi-byte tools
self.is_multi_byte = tool_source.parse_is_multi_byte()
# Legacy feature, ignored by UI.
self.force_history_refresh = False
self.display_interface = tool_source.parse_display_interface(default=self.display_interface)
self.require_login = tool_source.parse_require_login(self.require_login)
request_param_translation_elem = tool_source.parse_request_param_translation_elem()
if request_param_translation_elem is not None:
# Load input translator, used by datasource tools to change names/values of incoming parameters
self.input_translator = ToolInputTranslator.from_element(request_param_translation_elem)
else:
self.input_translator = None
self.parse_command(tool_source)
self.environment_variables = self.parse_environment_variables(tool_source)
self.tmp_directory_vars = tool_source.parse_tmp_directory_vars()
home_target = tool_source.parse_home_target()
tmp_target = tool_source.parse_tmp_target()
# If a tool explicitly sets one of these variables just respect that and turn off
# explicit processing by Galaxy.
for environment_variable in self.environment_variables:
if environment_variable.get("name") == "HOME":
home_target = None
continue
for tmp_directory_var in self.tmp_directory_vars:
if environment_variable.get("name") == tmp_directory_var:
tmp_target = None
break
self.home_target = home_target
self.tmp_target = tmp_target
self.docker_env_pass_through = tool_source.parse_docker_env_pass_through()
if self.environment_variables:
if not self.docker_env_pass_through:
self.docker_env_pass_through = []
self.docker_env_pass_through.extend(map(lambda x: x['name'], self.environment_variables))
# Parameters used to build URL for redirection to external app
redirect_url_params = tool_source.parse_redirect_url_params_elem()
if redirect_url_params is not None and redirect_url_params.text is not None:
# get rid of leading / trailing white space
redirect_url_params = redirect_url_params.text.strip()
# Replace remaining white space with something we can safely split on later
# when we are building the params
self.redirect_url_params = redirect_url_params.replace(' ', '**^**')
else:
self.redirect_url_params = ''
# Short description of the tool
self.description = tool_source.parse_description()
# Versioning for tools
self.version_string_cmd = None
version_command = tool_source.parse_version_command()
if version_command is not None:
self.version_string_cmd = version_command.strip()
version_cmd_interpreter = tool_source.parse_version_command_interpreter()
if version_cmd_interpreter:
executable = self.version_string_cmd.split()[0]
abs_executable = os.path.abspath(os.path.join(self.tool_dir, executable))
command_line = self.version_string_cmd.replace(executable, abs_executable, 1)
self.version_string_cmd = f"{version_cmd_interpreter} {command_line}"
# Parallelism for tasks, read from tool config.
self.parallelism = tool_source.parse_parallelism()
# Get JobToolConfiguration(s) valid for this particular Tool. At least
# a 'default' will be provided that uses the 'default' handler and
# 'default' destination. I thought about moving this to the
# job_config, but it makes more sense to store here. -nate
if self.id:
self_ids = [self.id.lower()]
if self.old_id != self.id:
# Handle toolshed guids
self_ids = [self.id.lower(), self.id.lower().rsplit('/', 1)[0], self.old_id.lower()]
else:
self_ids = []
self.all_ids = self_ids
# In the toolshed context, there is no job config.
if hasattr(self.app, 'job_config'):
# Order of this list must match documentation in job_conf.sample_advanced.yml
tool_classes = []
if self.tool_type_local:
tool_classes.append("local")
elif self.old_id in ['upload1', '__DATA_FETCH__']:
tool_classes.append("local")
if self.requires_galaxy_python_environment:
tool_classes.append("requires_galaxy")
self.job_tool_configurations = self.app.job_config.get_job_tool_configurations(self_ids, tool_classes)
# Is this a 'hidden' tool (hidden in tool menu)
self.hidden = tool_source.parse_hidden()
self.license = tool_source.parse_license()
self.creator = tool_source.parse_creator()
self.__parse_legacy_features(tool_source)
# Load any tool specific options (optional)
self.options = dict(
sanitize=tool_source.parse_sanitize(),
refresh=tool_source.parse_refresh(),
)
self.options = Bunch(** self.options)
# Read in name of galaxy.json metadata file and how to parse it.
self.provided_metadata_file = tool_source.parse_provided_metadata_file()
self.provided_metadata_style = tool_source.parse_provided_metadata_style()
# Parse tool help
self.parse_help(tool_source)
# Parse result handling for tool exit codes and stdout/stderr messages:
self.parse_stdio(tool_source)
self.strict_shell = tool_source.parse_strict_shell()
# Any extra generated config files for the tool
self.__parse_config_files(tool_source)
# Action
action = tool_source.parse_action_module()
if action is None:
self.tool_action = self.default_tool_action()
else:
module, cls = action
mod = __import__(module, globals(), locals(), [cls])
self.tool_action = getattr(mod, cls)()
if getattr(self.tool_action, "requires_js_runtime", False):
try:
expressions.find_engine(self.app.config)
except Exception:
message = REQUIRES_JS_RUNTIME_MESSAGE % self.tool_id or self.tool_uuid
raise Exception(message)
# Tests
self.__parse_tests(tool_source)
# Requirements (dependencies)
requirements, containers = tool_source.parse_requirements_and_containers()
self.requirements = requirements
self.containers = containers
required_files = tool_source.parse_required_files()
if required_files is None:
old_id = self.old_id
if old_id in IMPLICITLY_REQUIRED_TOOL_FILES:
lineage_requirement = IMPLICITLY_REQUIRED_TOOL_FILES[old_id]
lineage_requirement_until = lineage_requirement.get("version")
if lineage_requirement_until is None or self.version_object < lineage_requirement_until:
required_files = RequiredFiles.from_dict(lineage_requirement["required"])
self.required_files = required_files
self.citations = self._parse_citations(tool_source)
xrefs = tool_source.parse_xrefs()
has_biotools_reference = any(x["reftype"] == "bio.tools" for x in xrefs)
if not has_biotools_reference:
legacy_biotools_ref = self.legacy_biotools_external_reference
if legacy_biotools_ref is not None:
xrefs.append({"value": legacy_biotools_ref, "reftype": "bio.tools"})
self.xrefs = xrefs
self.__parse_trackster_conf(tool_source)
# Record macro paths so we can reload a tool if any of its macro has changes
self._macro_paths = tool_source.macro_paths
self.ports = tool_source.parse_interactivetool()
def __parse_legacy_features(self, tool_source):
self.code_namespace = dict()
self.hook_map = {}
self.uihints = {}
if not hasattr(tool_source, 'root'):
return
# TODO: Move following logic into XmlToolSource.
root = tool_source.root
# Load any tool specific code (optional) Edit: INS 5/29/2007,
# allow code files to have access to the individual tool's
# "module" if it has one. Allows us to reuse code files, etc.
for code_elem in root.findall("code"):
for hook_elem in code_elem.findall("hook"):
for key, value in hook_elem.items():
# map hook to function
self.hook_map[key] = value
file_name = code_elem.get("file")
code_path = os.path.join(self.tool_dir, file_name)
if self._allow_code_files:
with open(code_path) as f:
code_string = f.read()
try:
compiled_code = compile(code_string, code_path, 'exec')
exec(compiled_code, self.code_namespace)
except Exception:
if refactoring_tool and self.python_template_version.release[0] < 3:
# Could be a code file that uses python 2 syntax
translated_code = str(refactoring_tool.refactor_string(code_string, name='auto_translated_code_file'))
compiled_code = compile(translated_code, f"futurized_{code_path}", 'exec')
exec(compiled_code, self.code_namespace)
else:
raise
# User interface hints
uihints_elem = root.find("uihints")
if uihints_elem is not None:
for key, value in uihints_elem.attrib.items():
self.uihints[key] = value
def __parse_tests(self, tool_source):
self.__tests_source = tool_source
self.__tests_populated = False
def __parse_config_files(self, tool_source):
self.config_files = []
if not hasattr(tool_source, 'root'):
return
root = tool_source.root
conf_parent_elem = root.find("configfiles")
if conf_parent_elem is not None:
inputs_elem = conf_parent_elem.find("inputs")
if inputs_elem is not None:
name = inputs_elem.get("name")
filename = inputs_elem.get("filename", None)
format = inputs_elem.get("format", "json")
data_style = inputs_elem.get("data_style", "skip")
content = dict(format=format, handle_files=data_style, type="inputs")
self.config_files.append((name, filename, content))
file_sources_elem = conf_parent_elem.find("file_sources")
if file_sources_elem is not None:
name = file_sources_elem.get("name")
filename = file_sources_elem.get("filename", None)
content = dict(type="files")
self.config_files.append((name, filename, content))
for conf_elem in conf_parent_elem.findall("configfile"):
name = conf_elem.get("name")
filename = conf_elem.get("filename", None)
content = conf_elem.text
self.config_files.append((name, filename, content))
def __parse_trackster_conf(self, tool_source):
self.trackster_conf = None
if not hasattr(tool_source, 'root'):
return
# Trackster configuration.
trackster_conf = tool_source.root.find("trackster_conf")
if trackster_conf is not None:
self.trackster_conf = TracksterConfig.parse(trackster_conf)
@property
def tests(self):
self.assert_finalized()
if not self.__tests_populated:
tests_source = self.__tests_source
if tests_source:
try:
self.__tests = parse_tests(self, tests_source)
except Exception:
self.__tests = None
log.exception("Failed to parse tool tests for tool '%s'", self.id)
else:
self.__tests = None
self.__tests_populated = True
return self.__tests
@property
def _repository_dir(self):
"""If tool shed installed tool, the base directory of the repository installed."""
repository_base_dir = None
if getattr(self, 'tool_shed', None):
tool_dir = Path(self.tool_dir)
for repo_dir in itertools.chain([tool_dir], tool_dir.parents):
if repo_dir.name == self.repository_name:
return str(repo_dir)
else:
log.error(f"Problem finding repository dir for tool '{self.id}'")
return repository_base_dir
def test_data_path(self, filename):
repository_dir = self._repository_dir
test_data = None
if repository_dir:
test_data = self.__walk_test_data(dir=repository_dir, filename=filename)
else:
if self.tool_dir:
tool_dir = self.tool_dir
if isinstance(self, DataManagerTool):
tool_dir = os.path.dirname(self.tool_dir)
test_data = self.__walk_test_data(tool_dir, filename=filename)
if not test_data:
# Fallback to Galaxy test data directory for builtin tools, tools
# under development, and some older ToolShed published tools that
# used stock test data.
test_data = self.app.test_data_resolver.get_filename(filename)
return test_data
def __walk_test_data(self, dir, filename):
for root, dirs, _ in os.walk(dir):
if '.hg' in dirs:
dirs.remove('.hg')
if 'test-data' in dirs:
test_data_dir = os.path.join(root, 'test-data')
result = os.path.abspath(os.path.join(test_data_dir, filename))
if not in_directory(result, test_data_dir):
# Don't raise an explicit exception and reveal details about what
# files are or are not on the path, simply return None and let the
# API raise a 404.
return None
else:
if os.path.exists(result):
return result
def tool_provided_metadata(self, job_wrapper):
meta_file = os.path.join(job_wrapper.tool_working_directory, self.provided_metadata_file)
return parse_tool_provided_metadata(meta_file, provided_metadata_style=self.provided_metadata_style, job_wrapper=job_wrapper)
def parse_command(self, tool_source):
"""
"""
# Command line (template). Optional for tools that do not invoke a local program
command = tool_source.parse_command()
if command is not None:
self.command = command.lstrip() # get rid of leading whitespace
# Must pre-pend this AFTER processing the cheetah command template
self.interpreter = tool_source.parse_interpreter()
else:
self.command = ''
self.interpreter = None
def parse_environment_variables(self, tool_source):
return tool_source.parse_environment_variables()
def parse_inputs(self, tool_source):
"""
Parse the "<inputs>" element and create appropriate `ToolParameter` s.
This implementation supports multiple pages and grouping constructs.
"""
# Load parameters (optional)
self.inputs = {}
pages = tool_source.parse_input_pages()
enctypes = set()
if pages.inputs_defined:
if hasattr(pages, "input_elem"):
input_elem = pages.input_elem
# Handle properties of the input form
self.check_values = string_as_bool(input_elem.get("check_values", self.check_values))
self.nginx_upload = string_as_bool(input_elem.get("nginx_upload", self.nginx_upload))
self.action = input_elem.get('action', self.action)
# If we have an nginx upload, save the action as a tuple instead of
# a string. The actual action needs to get url_for run to add any
# prefixes, and we want to avoid adding the prefix to the
# nginx_upload_path.
if self.nginx_upload and self.app.config.nginx_upload_path:
if '?' in unquote_plus(self.action):
raise Exception('URL parameters in a non-default tool action can not be used '
'in conjunction with nginx upload. Please convert them to '
'hidden POST parameters')
self.action = (f"{self.app.config.nginx_upload_path}?nginx_redir=",
unquote_plus(self.action))
self.target = input_elem.get("target", self.target)
self.method = input_elem.get("method", self.method)
# Parse the actual parameters
# Handle multiple page case
for page_source in pages.page_sources:
inputs = self.parse_input_elem(page_source, enctypes)
display = page_source.parse_display()
self.inputs_by_page.append(inputs)
self.inputs.update(inputs)
self.display_by_page.append(display)
else:
self.inputs_by_page.append(self.inputs)
self.display_by_page.append(None)
self.display = self.display_by_page[0]
self.npages = len(self.inputs_by_page)
self.last_page = len(self.inputs_by_page) - 1
self.has_multiple_pages = bool(self.last_page)
# Determine the needed enctype for the form
if len(enctypes) == 0:
self.enctype = "application/x-www-form-urlencoded"
elif len(enctypes) == 1:
self.enctype = enctypes.pop()
else:
raise Exception(f"Conflicting required enctypes: {str(enctypes)}")
# Check if the tool either has no parameters or only hidden (and
# thus hardcoded) FIXME: hidden parameters aren't
# parameters at all really, and should be passed in a different
# way, making this check easier.
template_macros = {}
if hasattr(tool_source, 'root'):
template_macros = template_macro_params(tool_source.root)
self.template_macro_params = template_macros
for param in self.inputs.values():
if not isinstance(param, (HiddenToolParameter, BaseURLToolParameter)):
self.input_required = True
break
def parse_help(self, tool_source):
"""
Parse the help text for the tool. Formatted in reStructuredText, but
stored as Mako to allow for dynamic image paths.
This implementation supports multiple pages.
"""
# TODO: Allow raw HTML or an external link.
self.__help = HELP_UNINITIALIZED
self.__help_by_page = HELP_UNINITIALIZED
self.__help_source = tool_source
def parse_outputs(self, tool_source):
"""
Parse <outputs> elements and fill in self.outputs (keyed by name)
"""
self.outputs, self.output_collections = tool_source.parse_outputs(self)
# TODO: Include the tool's name in any parsing warnings.
def parse_stdio(self, tool_source):
"""
Parse <stdio> element(s) and fill in self.return_codes,
self.stderr_rules, and self.stdout_rules. Return codes have a range
and an error type (fault or warning). Stderr and stdout rules have
a regular expression and an error level (fault or warning).
"""
exit_codes, regexes = tool_source.parse_stdio()
self.stdio_exit_codes = exit_codes
self.stdio_regexes = regexes
def _parse_citations(self, tool_source):
# TODO: Move following logic into ToolSource abstraction.
if not hasattr(tool_source, 'root'):
return []
root = tool_source.root
citations = []
citations_elem = root.find("citations")
if citations_elem is None:
return citations
for citation_elem in citations_elem:
if citation_elem.tag != "citation":
pass
if hasattr(self.app, 'citations_manager'):
citation = self.app.citations_manager.parse_citation(citation_elem)
if citation:
citations.append(citation)
return citations
def parse_input_elem(self, page_source, enctypes, context=None):
"""
Parse a parent element whose children are inputs -- these could be
groups (repeat, conditional) or param elements. Groups will be parsed
recursively.
"""
rval = {}
context = ExpressionContext(rval, context)
for input_source in page_source.parse_input_sources():
# Repeat group
input_type = input_source.parse_input_type()
if input_type == "repeat":
group = Repeat()
group.name = input_source.get("name")
group.title = input_source.get("title")
group.help = input_source.get("help", None)
page_source = input_source.parse_nested_inputs_source()
group.inputs = self.parse_input_elem(page_source, enctypes, context)
group.default = int(input_source.get("default", 0))
group.min = int(input_source.get("min", 0))
# Use float instead of int so that 'inf' can be used for no max
group.max = float(input_source.get("max", "inf"))
assert group.min <= group.max, ValueError(f"Tool with id '{self.id}': min repeat count must be less-than-or-equal to the max.")
# Force default to be within min-max range
group.default = min(max(group.default, group.min), group.max)
rval[group.name] = group
elif input_type == "conditional":
group = Conditional()
group.name = input_source.get("name")
group.value_ref = input_source.get('value_ref', None)
group.value_ref_in_group = input_source.get_bool('value_ref_in_group', True)
value_from = input_source.get("value_from", None)
if value_from:
value_from = value_from.split(':')
group.value_from = locals().get(value_from[0])
group.test_param = rval[group.value_ref]
group.test_param.refresh_on_change = True
for attr in value_from[1].split('.'):
group.value_from = getattr(group.value_from, attr)
for case_value, case_inputs in group.value_from(context, group, self).items():
case = ConditionalWhen()
case.value = case_value
if case_inputs:
page_source = XmlPageSource(XML(f"<when>{case_inputs}</when>"))
case.inputs = self.parse_input_elem(page_source, enctypes, context)
else:
case.inputs = {}
group.cases.append(case)
else:
# Should have one child "input" which determines the case
test_param_input_source = input_source.parse_test_input_source()
group.test_param = self.parse_param_elem(test_param_input_source, enctypes, context)
if group.test_param.optional:
log.debug(f"Tool with id '{self.id}': declares a conditional test parameter as optional, this is invalid and will be ignored.")
group.test_param.optional = False
possible_cases = list(group.test_param.legal_values) # store possible cases, undefined whens will have no inputs
# Must refresh when test_param changes
group.test_param.refresh_on_change = True
# And a set of possible cases
for (value, case_inputs_source) in input_source.parse_when_input_sources():
case = ConditionalWhen()
case.value = value
case.inputs = self.parse_input_elem(case_inputs_source, enctypes, context)
group.cases.append(case)
try:
possible_cases.remove(case.value)
except Exception:
log.debug("Tool with id '%s': a when tag has been defined for '%s (%s) --> %s', but does not appear to be selectable." %
(self.id, group.name, group.test_param.name, case.value))
for unspecified_case in possible_cases:
log.warning("Tool with id '%s': a when tag has not been defined for '%s (%s) --> %s', assuming empty inputs." %
(self.id, group.name, group.test_param.name, unspecified_case))
case = ConditionalWhen()
case.value = unspecified_case
case.inputs = {}
group.cases.append(case)
rval[group.name] = group
elif input_type == "section":
group = Section()
group.name = input_source.get("name")
group.title = input_source.get("title")
group.help = input_source.get("help", None)
group.expanded = input_source.get_bool("expanded", False)
page_source = input_source.parse_nested_inputs_source()
group.inputs = self.parse_input_elem(page_source, enctypes, context)
rval[group.name] = group
elif input_type == "upload_dataset":
elem = input_source.elem()
group = UploadDataset()
group.name = elem.get("name")
group.title = elem.get("title")
group.file_type_name = elem.get('file_type_name', group.file_type_name)
group.default_file_type = elem.get('default_file_type', group.default_file_type)
group.metadata_ref = elem.get('metadata_ref', group.metadata_ref)
try:
rval[group.file_type_name].refresh_on_change = True
except KeyError:
pass
group_page_source = XmlPageSource(elem)
group.inputs = self.parse_input_elem(group_page_source, enctypes, context)
rval[group.name] = group
elif input_type == "param":
param = self.parse_param_elem(input_source, enctypes, context)
rval[param.name] = param
if hasattr(param, 'data_ref'):
param.ref_input = context[param.data_ref]
self.input_params.append(param)
return rval
def parse_param_elem(self, input_source, enctypes, context):
"""
Parse a single "<param>" element and return a ToolParameter instance.
Also, if the parameter has a 'required_enctype' add it to the set
enctypes.
"""
param = ToolParameter.build(self, input_source)
param_enctype = param.get_required_enctype()
if param_enctype:
enctypes.add(param_enctype)
# If parameter depends on any other paramters, we must refresh the
# form when it changes
for name in param.get_dependencies():
# Let it throw exception, but give some hint what the problem might be
if name not in context:
log.error(f"Tool with id '{self.id}': Could not find dependency '{name}' of parameter '{param.name}'")
context[name].refresh_on_change = True
return param
def populate_resource_parameters(self, tool_source):
root = getattr(tool_source, 'root', None)
if root is not None and hasattr(self.app, 'job_config') and hasattr(self.app.job_config, 'get_tool_resource_xml'):
resource_xml = self.app.job_config.get_tool_resource_xml(root.get('id', '').lower(), self.tool_type)
if resource_xml is not None:
inputs = root.find('inputs')
if inputs is None:
inputs = parse_xml_string('<inputs/>')
root.append(inputs)
inputs.append(resource_xml)
def populate_tool_shed_info(self, tool_shed_repository):
if tool_shed_repository:
self.tool_shed = tool_shed_repository.tool_shed
self.repository_name = tool_shed_repository.name
self.repository_owner = tool_shed_repository.owner
self.changeset_revision = tool_shed_repository.changeset_revision
self.installed_changeset_revision = tool_shed_repository.installed_changeset_revision
self.sharable_url = get_tool_shed_repository_url(
self.app, self.tool_shed, self.repository_owner, self.repository_name
)
@property
def legacy_biotools_external_reference(self) -> Optional[str]:
"""Return a bio.tools ID if any of tool's IDs are BIOTOOLS_MAPPING."""
for tool_id in self.all_ids:
if tool_id in BIOTOOLS_MAPPING:
return BIOTOOLS_MAPPING[tool_id]
return None
@property
def biotools_reference(self) -> Optional[str]:
"""Return a bio.tools ID if external reference to it is found.
If multiple bio.tools references are found, return just the first one.
"""
for xref in self.xrefs:
if xref["reftype"] == "bio.tools":
return xref["value"]
return None
@property
def help(self):
if self.__help is HELP_UNINITIALIZED:
self.__ensure_help()
return self.__help
@property
def help_by_page(self):
if self.__help_by_page is HELP_UNINITIALIZED:
self.__ensure_help()
return self.__help_by_page
@property
def raw_help(self):
# may return rst (or Markdown in the future)
tool_source = self.__help_source
help_text = tool_source.parse_help()
return help_text
def __ensure_help(self):
with HELP_UNINITIALIZED:
if self.__help is HELP_UNINITIALIZED:
self.__inititalize_help()
def __inititalize_help(self):
tool_source = self.__help_source
self.__help = None
self.__help_by_page = []
help_footer = ""
help_text = tool_source.parse_help()
if help_text is not None:
try:
if help_text.find('.. image:: ') >= 0 and (self.tool_shed_repository or self.repository_id):
help_text = set_image_paths(
self.app, help_text, encoded_repository_id=self.repository_id, tool_shed_repository=self.tool_shed_repository, tool_id=self.old_id, tool_version=self.version
)
except Exception:
log.exception("Exception in parse_help, so images may not be properly displayed for tool with id '%s'", self.id)
try:
self.__help = Template(rst_to_html(help_text), input_encoding='utf-8',
default_filters=['decode.utf8'],
encoding_errors='replace')
except Exception:
log.exception("Exception while parsing help for tool with id '%s'", self.id)
# Handle deprecated multi-page help text in XML case.
if hasattr(tool_source, "root"):
help_elem = tool_source.root.find("help")
help_header = help_text
help_pages = help_elem.findall("page")
# Multiple help page case
if help_pages:
for help_page in help_pages:
self.__help_by_page.append(help_page.text)
help_footer = help_footer + help_page.tail
# Each page has to rendered all-together because of backreferences allowed by rst
try:
self.__help_by_page = [Template(rst_to_html(help_header + x + help_footer),
input_encoding='utf-8',
default_filters=['decode.utf8'],
encoding_errors='replace')
for x in self.__help_by_page]
except Exception:
log.exception("Exception while parsing multi-page help for tool with id '%s'", self.id)
# Pad out help pages to match npages ... could this be done better?
while len(self.__help_by_page) < self.npages:
self.__help_by_page.append(self.__help)
def find_output_def(self, name):
# name is JobToOutputDatasetAssociation name.
# TODO: to defensive, just throw IndexError and catch somewhere
# up that stack.
if ToolOutputCollectionPart.is_named_collection_part_name(name):
collection_name, part = ToolOutputCollectionPart.split_output_name(name)
collection_def = self.output_collections.get(collection_name, None)
if not collection_def:
return None
return collection_def.outputs.get(part, None)
else:
return self.outputs.get(name, None)
@property
def is_workflow_compatible(self):
is_workflow_compatible = self._is_workflow_compatible
if is_workflow_compatible is None:
is_workflow_compatible = self.check_workflow_compatible(self.tool_source)
if self.finalized:
self._is_workflow_compatible = is_workflow_compatible
return is_workflow_compatible
def check_workflow_compatible(self, tool_source):
"""
Determine if a tool can be used in workflows. External tools and the
upload tool are currently not supported by workflows.
"""
# Multiple page tools are not supported -- we're eliminating most
# of these anyway
if self.finalized and self.has_multiple_pages:
return False
# This is probably the best bet for detecting external web tools
# right now
if self.tool_type.startswith('data_source'):
return False
if hasattr(tool_source, "root"):
root = tool_source.root
if not string_as_bool(root.get("workflow_compatible", "True")):
return False
# TODO: Anyway to capture tools that dynamically change their own
# outputs?
return True
def new_state(self, trans):
"""
Create a new `DefaultToolState` for this tool. It will be initialized
with default values for inputs. Grouping elements are filled in recursively.
"""
state = DefaultToolState()
state.initialize(trans, self)
return state
def get_param(self, key):
"""
Returns the parameter named `key` or None if there is no such
parameter.
"""
return self.inputs.get(key, None)
def get_hook(self, name):
"""
Returns an object from the code file referenced by `code_namespace`
(this will normally be a callable object)
"""
if self.code_namespace:
# Try to look up hook in self.hook_map, otherwise resort to default
if name in self.hook_map and self.hook_map[name] in self.code_namespace:
return self.code_namespace[self.hook_map[name]]
elif name in self.code_namespace:
return self.code_namespace[name]
return None
def visit_inputs(self, values, callback):
"""
Call the function `callback` on each parameter of this tool. Visits
grouping parameters recursively and constructs unique prefixes for
each nested set of The callback method is then called as:
`callback( level_prefix, parameter, parameter_value )`
"""
# HACK: Yet another hack around check_values -- WHY HERE?
if self.check_values:
visit_input_values(self.inputs, values, callback)
def expand_incoming(self, trans, incoming, request_context, input_format='legacy'):
rerun_remap_job_id = None
if 'rerun_remap_job_id' in incoming:
try:
rerun_remap_job_id = trans.app.security.decode_id(incoming['rerun_remap_job_id'])
except Exception as exception:
log.error(str(exception))
raise exceptions.MessageException("Failure executing tool with id '%s' (attempting to rerun invalid job).", self.id)
set_dataset_matcher_factory(request_context, self)
# Fixed set of input parameters may correspond to any number of jobs.
# Expand these out to individual parameters for given jobs (tool executions).
expanded_incomings, collection_info = expand_meta_parameters(trans, self, incoming)
# Remapping a single job to many jobs doesn't make sense, so disable
# remap if multi-runs of tools are being used.
if rerun_remap_job_id and len(expanded_incomings) > 1:
raise exceptions.MessageException(
"Failure executing tool with id '%s' (cannot create multiple jobs when remapping existing job).", self.id)
# Process incoming data
validation_timer = self.app.execution_timer_factory.get_timer(
'internals.galaxy.tools.validation',
'Validated and populated state for tool request',
)
all_errors = []
all_params = []
for expanded_incoming in expanded_incomings:
params = {}
errors = {}
if self.input_translator:
self.input_translator.translate(expanded_incoming)
if not self.check_values:
# If `self.check_values` is false we don't do any checking or
# processing on input This is used to pass raw values
# through to/from external sites.
params = expanded_incoming
else:
# Update state for all inputs on the current page taking new
# values from `incoming`.
populate_state(request_context, self.inputs, expanded_incoming, params, errors, simple_errors=False, input_format=input_format)
# If the tool provides a `validate_input` hook, call it.
validate_input = self.get_hook('validate_input')
if validate_input:
validate_input(request_context, errors, params, self.inputs)
all_errors.append(errors)
all_params.append(params)
unset_dataset_matcher_factory(request_context)
log.info(validation_timer)
return all_params, all_errors, rerun_remap_job_id, collection_info
def handle_input(self, trans, incoming, history=None, use_cached_job=False, input_format='legacy'):
"""
Process incoming parameters for this tool from the dict `incoming`,
update the tool state (or create if none existed), and either return
to the form or execute the tool (only if 'execute' was clicked and
there were no errors).
"""
request_context = proxy_work_context_for_history(trans, history=history)
all_params, all_errors, rerun_remap_job_id, collection_info = self.expand_incoming(trans=trans, incoming=incoming, request_context=request_context, input_format=input_format)
# If there were errors, we stay on the same page and display them
if any(all_errors):
# simple param_key -> message string for tool form.
err_data = {key: unicodify(value) for d in all_errors for (key, value) in d.items()}
param_errors = {}
for d in all_errors:
for key, value in d.items():
if hasattr(value, 'to_dict'):
value_obj = value.to_dict()
else:
value_obj = {"message": unicodify(value)}
param_errors[key] = value_obj
raise exceptions.RequestParameterInvalidException(', '.join(msg for msg in err_data.values()), err_data=err_data, param_errors=param_errors)
else:
mapping_params = MappingParameters(incoming, all_params)
completed_jobs = {}
for i, param in enumerate(all_params):
if use_cached_job:
completed_jobs[i] = self.job_search.by_tool_input(
trans=trans,
tool_id=self.id,
tool_version=self.version,
param=param,
param_dump=self.params_to_strings(param, self.app, nested=True),
job_state=None,
)
else:
completed_jobs[i] = None
execution_tracker = execute_job(trans, self, mapping_params, history=request_context.history, rerun_remap_job_id=rerun_remap_job_id, collection_info=collection_info, completed_jobs=completed_jobs)
# Raise an exception if there were jobs to execute and none of them were submitted,
# if at least one is submitted or there are no jobs to execute - return aggregate
# information including per-job errors. Arguably we should just always return the
# aggregate information - we just haven't done that historically.
raise_execution_exception = not execution_tracker.successful_jobs and len(all_params) > 0
if raise_execution_exception:
raise exceptions.MessageException(execution_tracker.execution_errors[0])
return dict(out_data=execution_tracker.output_datasets,
num_jobs=len(execution_tracker.successful_jobs),
job_errors=execution_tracker.execution_errors,
jobs=execution_tracker.successful_jobs,
output_collections=execution_tracker.output_collections,
implicit_collections=execution_tracker.implicit_collections)
def handle_single_execution(self, trans, rerun_remap_job_id, execution_slice, history, execution_cache=None, completed_job=None, collection_info=None, job_callback=None, flush_job=True):
"""
Return a pair with whether execution is successful as well as either
resulting output data or an error message indicating the problem.
"""
try:
rval = self.execute(
trans,
incoming=execution_slice.param_combination,
history=history,
rerun_remap_job_id=rerun_remap_job_id,
execution_cache=execution_cache,
dataset_collection_elements=execution_slice.dataset_collection_elements,
completed_job=completed_job,
collection_info=collection_info,
job_callback=job_callback,
flush_job=flush_job,
)
job = rval[0]
out_data = rval[1]
if len(rval) > 2:
execution_slice.history = rval[2]
except (webob.exc.HTTPFound, exceptions.MessageException) as e:
# if it's a webob redirect exception, pass it up the stack
raise e
except ToolInputsNotReadyException as e:
return False, e
except Exception as e:
log.exception("Exception caught while attempting to execute tool with id '%s':", self.id)
message = f"Error executing tool with id '{self.id}': {unicodify(e)}"
return False, message
if isinstance(out_data, dict):
return job, list(out_data.items())
else:
if isinstance(out_data, str):
message = out_data
else:
message = f"Failure executing tool with id '{self.id}' (invalid data returned from tool execution)"
return False, message
def find_fieldstorage(self, x):
if isinstance(x, cgi_FieldStorage):
raise InterruptedUpload(None)
elif isinstance(x, dict):
[self.find_fieldstorage(y) for y in x.values()]
elif isinstance(x, list):
[self.find_fieldstorage(y) for y in x]
@property
def params_with_missing_data_table_entry(self):
"""
Return all parameters that are dynamically generated select lists whose
options require an entry not currently in the tool_data_table_conf.xml file.
"""
params = []
for input_param in self.input_params:
if isinstance(input_param, SelectToolParameter) and input_param.is_dynamic:
options = input_param.options
if options and options.missing_tool_data_table_name and input_param not in params:
params.append(input_param)
return params
@property
def params_with_missing_index_file(self):
"""
Return all parameters that are dynamically generated
select lists whose options refer to a missing .loc file.
"""
params = []
for input_param in self.input_params:
if isinstance(input_param, SelectToolParameter) and input_param.is_dynamic:
options = input_param.options
if options and options.tool_data_table and options.tool_data_table.missing_index_file and input_param not in params:
params.append(input_param)
return params
def get_static_param_values(self, trans):
"""
Returns a map of parameter names and values if the tool does not
require any user input. Will raise an exception if any parameter
does require input.
"""
args = dict()
for key, param in self.inputs.items():
# BaseURLToolParameter is now a subclass of HiddenToolParameter, so
# we must check if param is a BaseURLToolParameter first
if isinstance(param, BaseURLToolParameter):
args[key] = param.get_initial_value(trans, None)
elif isinstance(param, HiddenToolParameter):
args[key] = model.User.expand_user_properties(trans.user, param.value)
else:
raise Exception("Unexpected parameter type")
return args
def execute(self, trans, incoming=None, set_output_hid=True, history=None, **kwargs):
"""
Execute the tool using parameter values in `incoming`. This just
dispatches to the `ToolAction` instance specified by
`self.tool_action`. In general this will create a `Job` that
when run will build the tool's outputs, e.g. `DefaultToolAction`.
"""
if incoming is None:
incoming = {}
try:
return self.tool_action.execute(self, trans, incoming=incoming, set_output_hid=set_output_hid, history=history, **kwargs)
except exceptions.ToolExecutionError as exc:
job = exc.job
job_id = 'unknown'
if job is not None:
job.mark_failed(info=exc.err_msg, blurb=exc.err_code.default_error_message)
job_id = job.id
log.error("Tool execution failed for job: %s", job_id)
raise
def params_to_strings(self, params, app, nested=False):
return params_to_strings(self.inputs, params, app, nested)
def params_from_strings(self, params, app, ignore_errors=False):
return params_from_strings(self.inputs, params, app, ignore_errors)
def check_and_update_param_values(self, values, trans, update_values=True, workflow_building_mode=False):
"""
Check that all parameters have values, and fill in with default
values where necessary. This could be called after loading values
from a database in case new parameters have been added.
"""
messages = {}
request_context = proxy_work_context_for_history(trans, workflow_building_mode=workflow_building_mode)
def validate_inputs(input, value, error, parent, context, prefixed_name, prefixed_label, **kwargs):
if not error:
value, error = check_param(request_context, input, value, context)
if error:
if update_values and not hasattr(input, 'data_ref'):
try:
previous_value = value
value = input.get_initial_value(request_context, context)
if not prefixed_name.startswith('__'):
messages[prefixed_name] = error if previous_value == value else f'{error} Using default: \'{value}\'.'
parent[input.name] = value
except Exception:
messages[prefixed_name] = 'Attempt to replace invalid value for \'%s\' failed.' % (prefixed_label)
else:
messages[prefixed_name] = error
visit_input_values(self.inputs, values, validate_inputs)
return messages
def build_dependency_cache(self, **kwds):
if isinstance(self.app.toolbox.dependency_manager, CachedDependencyManager):
self.app.toolbox.dependency_manager.build_cache(
requirements=self.requirements,
installed_tool_dependencies=self.installed_tool_dependencies,
tool_dir=self.tool_dir,
job_directory=None,
metadata=False,
tool_instance=self,
**kwds
)
def build_dependency_shell_commands(self, job_directory=None, metadata=False):
"""
Return a list of commands to be run to populate the current environment to include this tools requirements.
"""
return self.app.toolbox.dependency_manager.dependency_shell_commands(
requirements=self.requirements,
installed_tool_dependencies=self.installed_tool_dependencies,
tool_dir=self.tool_dir,
job_directory=job_directory,
preserve_python_environment=self.requires_galaxy_python_environment,
metadata=metadata,
tool_instance=self
)
@property
def installed_tool_dependencies(self):
if self.tool_shed_repository:
installed_tool_dependencies = self.tool_shed_repository.tool_dependencies_installed_or_in_error
else:
installed_tool_dependencies = None
return installed_tool_dependencies
@property
def tool_requirements(self):
"""
Return all requiremens of type package
"""
return self.requirements.packages
@property
def tool_requirements_status(self):
"""
Return a list of dictionaries for all tool dependencies with their associated status
"""
return self._view.get_requirements_status({self.id: self.tool_requirements}, self.installed_tool_dependencies)
@property
def output_discover_patterns(self):
# patterns to collect for remote job execution
patterns = []
for output in self.outputs.values():
patterns.extend(output.output_discover_patterns)
return patterns
def build_redirect_url_params(self, param_dict):
"""
Substitute parameter values into self.redirect_url_params
"""
if not self.redirect_url_params:
return
redirect_url_params = None
# Substituting parameter values into the url params
redirect_url_params = fill_template(self.redirect_url_params, context=param_dict)
# Remove newlines
redirect_url_params = redirect_url_params.replace("\n", " ").replace("\r", " ")
return redirect_url_params
def parse_redirect_url(self, data, param_dict):
"""
Parse the REDIRECT_URL tool param. Tools that send data to an external
application via a redirect must include the following 3 tool params:
1) REDIRECT_URL - the url to which the data is being sent
2) DATA_URL - the url to which the receiving application will send an
http post to retrieve the Galaxy data
3) GALAXY_URL - the url to which the external application may post
data as a response
"""
redirect_url = param_dict.get('REDIRECT_URL')
redirect_url_params = self.build_redirect_url_params(param_dict)
# Add the parameters to the redirect url. We're splitting the param
# string on '**^**' because the self.parse() method replaced white
# space with that separator.
params = redirect_url_params.split('**^**')
rup_dict = {}
for param in params:
p_list = param.split('=')
p_name = p_list[0]
p_val = p_list[1]
rup_dict[p_name] = p_val
DATA_URL = param_dict.get('DATA_URL', None)
assert DATA_URL is not None, "DATA_URL parameter missing in tool config."
DATA_URL += f"/{str(data.id)}/display"
redirect_url += f"?DATA_URL={DATA_URL}"
# Add the redirect_url_params to redirect_url
for p_name in rup_dict:
redirect_url += f"&{p_name}={rup_dict[p_name]}"
# Add the current user email to redirect_url
if data.history.user:
USERNAME = str(data.history.user.email)
else:
USERNAME = 'Anonymous'
redirect_url += f"&USERNAME={USERNAME}"
return redirect_url
def call_hook(self, hook_name, *args, **kwargs):
"""
Call the custom code hook function identified by 'hook_name' if any,
and return the results
"""
try:
code = self.get_hook(hook_name)
if code:
return code(*args, **kwargs)
except Exception as e:
original_message = ''
if len(e.args):
original_message = e.args[0]
e.args = (f"Error in '{self.name}' hook '{hook_name}', original message: {original_message}", )
raise
def exec_before_job(self, app, inp_data, out_data, param_dict=None):
pass
def exec_after_process(self, app, inp_data, out_data, param_dict, job=None, **kwds):
pass
def job_failed(self, job_wrapper, message, exception=False):
"""
Called when a job has failed
"""
def discover_outputs(self, out_data, out_collections, tool_provided_metadata, tool_working_directory, job, input_ext, input_dbkey, inp_data=None, final_job_state='ok'):
"""
Find any additional datasets generated by a tool and attach (for
cases where number of outputs is not known in advance).
"""
# given the job_execution import is the only one, probably makes sense to refactor this out
# into job_wrapper.
tool = self
permission_provider = output_collect.PermissionProvider(inp_data, tool.app.security_agent, job)
metadata_source_provider = output_collect.MetadataSourceProvider(inp_data)
job_context = output_collect.JobContext(
tool,
tool_provided_metadata,
job,
tool_working_directory,
permission_provider,
metadata_source_provider,
input_dbkey,
object_store=tool.app.object_store,
final_job_state=final_job_state,
flush_per_n_datasets=tool.app.config.flush_per_n_datasets,
)
collected = output_collect.collect_primary_datasets(
job_context,
out_data,
input_ext,
)
output_collect.collect_dynamic_outputs(
job_context,
out_collections,
)
# Return value only used in unit tests. Probably should be returning number of collected
# bytes instead?
return collected
def to_archive(self):
tool = self
tarball_files = []
temp_files = []
with open(os.path.abspath(tool.config_file)) as fh:
tool_xml = fh.read()
# Retrieve tool help images and rewrite the tool's xml into a temporary file with the path
# modified to be relative to the repository root.
image_found = False
if tool.help is not None:
tool_help = tool.help._source
# Check each line of the rendered tool help for an image tag that points to a location under static/
for help_line in tool_help.split('\n'):
image_regex = re.compile(r'img alt="[^"]+" src="\${static_path}/([^"]+)"')
matches = re.search(image_regex, help_line)
if matches is not None:
tool_help_image = matches.group(1)
tarball_path = tool_help_image
filesystem_path = os.path.abspath(os.path.join(self.app.config.root, 'static', tool_help_image))
if os.path.exists(filesystem_path):
tarball_files.append((filesystem_path, tarball_path))
image_found = True
tool_xml = tool_xml.replace('${static_path}/%s' % tarball_path, tarball_path)
# If one or more tool help images were found, add the modified tool XML to the tarball instead of the original.
if image_found:
with tempfile.NamedTemporaryFile(mode='w', suffix='.xml', delete=False) as fh:
new_tool_config = fh.name
fh.write(tool_xml)
tool_tup = (new_tool_config, os.path.split(tool.config_file)[-1])
temp_files.append(new_tool_config)
else:
tool_tup = (os.path.abspath(tool.config_file), os.path.split(tool.config_file)[-1])
tarball_files.append(tool_tup)
# TODO: This feels hacky.
tool_command = tool.command.strip().split()[0]
tool_path = os.path.dirname(os.path.abspath(tool.config_file))
# Add the tool XML to the tuple that will be used to populate the tarball.
if os.path.exists(os.path.join(tool_path, tool_command)):
tarball_files.append((os.path.join(tool_path, tool_command), tool_command))
# Find and add macros and code files.
for external_file in tool.get_externally_referenced_paths(os.path.abspath(tool.config_file)):
external_file_abspath = os.path.abspath(os.path.join(tool_path, external_file))
tarball_files.append((external_file_abspath, external_file))
if os.path.exists(os.path.join(tool_path, "Dockerfile")):
tarball_files.append((os.path.join(tool_path, "Dockerfile"), "Dockerfile"))
# Find tests, and check them for test data.
tests = tool.tests
if tests is not None:
for test in tests:
# Add input file tuples to the list.
for input in test.inputs:
for input_value in test.inputs[input]:
input_filename = str(input_value)
input_path = os.path.abspath(os.path.join('test-data', input_filename))
if os.path.exists(input_path):
td_tup = (input_path, os.path.join('test-data', input_filename))
tarball_files.append(td_tup)
# And add output file tuples to the list.
for _, filename, _ in test.outputs:
output_filepath = os.path.abspath(os.path.join('test-data', filename))
if os.path.exists(output_filepath):
td_tup = (output_filepath, os.path.join('test-data', filename))
tarball_files.append(td_tup)
for param in tool.input_params:
# Check for tool data table definitions.
if hasattr(param, 'options'):
if hasattr(param.options, 'tool_data_table'):
data_table = param.options.tool_data_table
if hasattr(data_table, 'filenames'):
data_table_definitions = []
for data_table_filename in data_table.filenames:
# FIXME: from_shed_config seems to always be False.
if not data_table.filenames[data_table_filename]['from_shed_config']:
tar_file = f"{data_table.filenames[data_table_filename]['filename']}.sample"
sample_file = os.path.join(data_table.filenames[data_table_filename]['tool_data_path'],
tar_file)
# Use the .sample file, if one exists. If not, skip this data table.
if os.path.exists(sample_file):
tarfile_path, tarfile_name = os.path.split(tar_file)
tarfile_path = os.path.join('tool-data', tarfile_name)
tarball_files.append((sample_file, tarfile_path))
data_table_definitions.append(data_table.xml_string)
if len(data_table_definitions) > 0:
# Put the data table definition XML in a temporary file.
table_definition = '<?xml version="1.0" encoding="utf-8"?>\n<tables>\n %s</tables>'
table_definition = table_definition % '\n'.join(data_table_definitions)
with tempfile.NamedTemporaryFile(mode='w', delete=False) as fh:
table_conf = fh.name
fh.write(table_definition)
tarball_files.append((table_conf, os.path.join('tool-data', 'tool_data_table_conf.xml.sample')))
temp_files.append(table_conf)
# Create the tarball.
with tempfile.NamedTemporaryFile(suffix='.tgz', delete=False) as fh:
tarball_archive = fh.name
tarball = tarfile.open(name=tarball_archive, mode='w:gz')
# Add the files from the previously generated list.
for fspath, tarpath in tarball_files:
tarball.add(fspath, arcname=tarpath)
tarball.close()
# Delete any temporary files that were generated.
for temp_file in temp_files:
os.remove(temp_file)
return tarball_archive
def to_dict(self, trans, link_details=False, io_details=False, tool_help=False):
""" Returns dict of tool. """
# Basic information
tool_dict = super().to_dict()
tool_dict["edam_operations"] = self.edam_operations
tool_dict["edam_topics"] = self.edam_topics
tool_dict["hidden"] = self.hidden
tool_dict["is_workflow_compatible"] = self.is_workflow_compatible
tool_dict["xrefs"] = self.xrefs
# Fill in ToolShedRepository info
if hasattr(self, 'tool_shed') and self.tool_shed:
tool_dict['tool_shed_repository'] = {
'name': self.repository_name,
'owner': self.repository_owner,
'changeset_revision': self.changeset_revision,
'tool_shed': self.tool_shed
}
# If an admin user, expose the path to the actual tool config XML file.
if trans.user_is_admin:
config_file = None if not self.config_file else os.path.abspath(self.config_file)
tool_dict['config_file'] = config_file
# Add link details.
if link_details:
# Add details for creating a hyperlink to the tool.
if not isinstance(self, DataSourceTool):
link = self.app.url_for(controller='tool_runner', tool_id=self.id)
else:
link = self.app.url_for(controller='tool_runner', action='data_source_redirect', tool_id=self.id)
# Basic information
tool_dict.update({'link': link,
'min_width': self.uihints.get('minwidth', -1),
'target': self.target})
# Add input and output details.
if io_details:
tool_dict['inputs'] = [input.to_dict(trans) for input in self.inputs.values()]
tool_dict['outputs'] = [output.to_dict(app=self.app) for output in self.outputs.values()]
tool_dict['panel_section_id'], tool_dict['panel_section_name'] = self.get_panel_section()
tool_class = self.__class__
# FIXME: the Tool class should declare directly, instead of ad hoc inspection
regular_form = tool_class == Tool or isinstance(self, (DatabaseOperationTool, InteractiveTool))
tool_dict["form_style"] = "regular" if regular_form else "special"
if tool_help:
# create tool help
help_txt = ''
if self.help:
help_txt = self.help.render(static_path=self.app.url_for('/static'), host_url=self.app.url_for('/', qualified=True))
help_txt = unicodify(help_txt)
tool_dict['help'] = help_txt
return tool_dict
def to_json(self, trans, kwd=None, job=None, workflow_building_mode=False, history=None):
"""
Recursively creates a tool dictionary containing repeats, dynamic options and updated states.
"""
if kwd is None:
kwd = {}
if workflow_building_mode is workflow_building_modes.USE_HISTORY or workflow_building_mode is workflow_building_modes.DISABLED:
# We don't need a history when exporting a workflow for the workflow editor or when downloading a workflow
history = history or trans.get_history()
if history is None and job is not None:
history = self.history_manager.get_owned(job.history.id, trans.user, current_history=trans.history)
if history is None:
raise exceptions.MessageException('History unavailable. Please specify a valid history id')
# build request context
request_context = proxy_work_context_for_history(trans, history, workflow_building_mode=workflow_building_mode)
# load job parameters into incoming
tool_message = ''
tool_warnings = ''
if job:
try:
job_params = job.get_param_values(self.app, ignore_errors=True)
tool_warnings = self.check_and_update_param_values(job_params, request_context, update_values=True)
self._map_source_to_history(request_context, self.inputs, job_params)
tool_message = self._compare_tool_version(job)
params_to_incoming(kwd, self.inputs, job_params, self.app)
except Exception as e:
raise exceptions.MessageException(unicodify(e))
# create parameter object
params = Params(kwd, sanitize=False)
# expand incoming parameters (parameters might trigger multiple tool executions,
# here we select the first execution only in order to resolve dynamic parameters)
expanded_incomings, _ = expand_meta_parameters(trans, self, params.__dict__)
if expanded_incomings:
params.__dict__ = expanded_incomings[0]
# do param translation here, used by datasource tools
if self.input_translator:
self.input_translator.translate(params)
set_dataset_matcher_factory(request_context, self)
# create tool state
state_inputs = {}
state_errors = {}
populate_state(request_context, self.inputs, params.__dict__, state_inputs, state_errors)
# create tool model
tool_model = self.to_dict(request_context)
tool_model['inputs'] = []
self.populate_model(request_context, self.inputs, state_inputs, tool_model['inputs'])
unset_dataset_matcher_factory(request_context)
# create tool help
tool_help = ''
if self.help:
tool_help = self.help.render(static_path=self.app.url_for('/static'), host_url=self.app.url_for('/', qualified=True))
tool_help = unicodify(tool_help, 'utf-8')
if isinstance(self.action, tuple):
action = self.action[0] + self.app.url_for(self.action[1])
else:
action = self.app.url_for(self.action)
# update tool model
tool_model.update({
'id': self.id,
'help': tool_help,
'citations': bool(self.citations),
'sharable_url': self.sharable_url,
'message': tool_message,
'warnings': tool_warnings,
'versions': self.tool_versions,
'requirements': [{'name': r.name, 'version': r.version} for r in self.requirements],
'errors': state_errors,
'tool_errors': self.tool_errors,
'state_inputs': params_to_strings(self.inputs, state_inputs, self.app, use_security=True, nested=True),
'job_id': trans.security.encode_id(job.id) if job else None,
'job_remap': job.remappable() if job else None,
'history_id': trans.security.encode_id(history.id) if history else None,
'display': self.display_interface,
'action': action,
'license': self.license,
'creator': self.creator,
'method': self.method,
'enctype': self.enctype
})
return tool_model
def populate_model(self, request_context, inputs, state_inputs, group_inputs, other_values=None):
"""
Populates the tool model consumed by the client form builder.
"""
other_values = ExpressionContext(state_inputs, other_values)
for input_index, input in enumerate(inputs.values()):
tool_dict = None
group_state = state_inputs.get(input.name, {})
if input.type == 'repeat':
tool_dict = input.to_dict(request_context)
group_cache = tool_dict['cache'] = {}
for i in range(len(group_state)):
group_cache[i] = []
self.populate_model(request_context, input.inputs, group_state[i], group_cache[i], other_values)
elif input.type == 'conditional':
tool_dict = input.to_dict(request_context)
if 'test_param' in tool_dict:
test_param = tool_dict['test_param']
test_param['value'] = input.test_param.value_to_basic(group_state.get(test_param['name'], input.test_param.get_initial_value(request_context, other_values)), self.app)
test_param['text_value'] = input.test_param.value_to_display_text(test_param['value'])
for i in range(len(tool_dict['cases'])):
current_state = {}
if i == group_state.get('__current_case__'):
current_state = group_state
self.populate_model(request_context, input.cases[i].inputs, current_state, tool_dict['cases'][i]['inputs'], other_values)
elif input.type == 'section':
tool_dict = input.to_dict(request_context)
self.populate_model(request_context, input.inputs, group_state, tool_dict['inputs'], other_values)
else:
try:
initial_value = input.get_initial_value(request_context, other_values)
tool_dict = input.to_dict(request_context, other_values=other_values)
tool_dict['value'] = input.value_to_basic(state_inputs.get(input.name, initial_value), self.app, use_security=True)
tool_dict['default_value'] = input.value_to_basic(initial_value, self.app, use_security=True)
tool_dict['text_value'] = input.value_to_display_text(tool_dict['value'])
except ImplicitConversionRequired:
tool_dict = input.to_dict(request_context)
# This hack leads client to display a text field
tool_dict['textable'] = True
except Exception:
tool_dict = input.to_dict(request_context)
log.exception("tools::to_json() - Skipping parameter expansion '%s'", input.name)
if input_index >= len(group_inputs):
group_inputs.append(tool_dict)
else:
group_inputs[input_index] = tool_dict
def _map_source_to_history(self, trans, tool_inputs, params):
# Need to remap dataset parameters. Job parameters point to original
# dataset used; parameter should be the analygous dataset in the
# current history.
history = trans.history
# Create index for hdas.
hda_source_dict = {}
for hda in history.datasets:
key = f'{hda.hid}_{hda.dataset.id}'
hda_source_dict[hda.dataset.id] = hda_source_dict[key] = hda
# Ditto for dataset collections.
hdca_source_dict = {}
for hdca in history.dataset_collections:
key = f'{hdca.hid}_{hdca.collection.id}'
hdca_source_dict[hdca.collection.id] = hdca_source_dict[key] = hdca
# Map dataset or collection to current history
def map_to_history(value):
id = None
source = None
if isinstance(value, self.app.model.HistoryDatasetAssociation):
id = value.dataset.id
source = hda_source_dict
elif isinstance(value, self.app.model.HistoryDatasetCollectionAssociation):
id = value.collection.id
source = hdca_source_dict
else:
return None
key = f'{value.hid}_{id}'
if key in source:
return source[key]
elif id in source:
return source[id]
else:
return None
def mapping_callback(input, value, **kwargs):
if isinstance(input, DataToolParameter):
if isinstance(value, list):
values = []
for val in value:
new_val = map_to_history(val)
if new_val:
values.append(new_val)
else:
values.append(val)
return values
else:
return map_to_history(value)
elif isinstance(input, DataCollectionToolParameter):
return map_to_history(value)
visit_input_values(tool_inputs, params, mapping_callback)
def _compare_tool_version(self, job):
"""
Compares a tool version with the tool version from a job (from ToolRunner).
"""
tool_id = job.tool_id
tool_version = job.tool_version
message = ''
try:
select_field, tools, tool = self.app.toolbox.get_tool_components(tool_id, tool_version=tool_version, get_loaded_tools_by_lineage=False, set_selected=True)
if tool is None:
raise exceptions.MessageException('This dataset was created by an obsolete tool (%s). Can\'t re-run.' % tool_id)
if (self.id != tool_id and self.old_id != tool_id) or self.version != tool_version:
if self.id == tool_id:
if tool_version:
message = f'This job was run with tool version "{tool_version}", which is not available. '
if len(tools) > 1:
message += 'You can re-run the job with the selected tool or choose another version of the tool. '
else:
message += 'You can re-run the job with this tool version, which is a different version of the original tool. '
else:
new_tool_shed_url = f'{tool.sharable_url}/{tool.changeset_revision}/'
old_tool_shed_url = get_tool_shed_url_from_tool_shed_registry(self.app, tool_id.split('/repos/')[0])
old_tool_shed_url = f'{old_tool_shed_url}/view/{tool.repository_owner}/{tool.repository_name}/'
message = f'This job was run with <a href=\"{old_tool_shed_url}\" target=\"_blank\">tool id \"{tool_id}\"</a>, version "{tool_version}", which is not available. '
if len(tools) > 1:
message += f'You can re-run the job with the selected <a href=\"{new_tool_shed_url}\" target=\"_blank\">tool id \"{self.id}\"</a> or choose another derivation of the tool. '
else:
message += f'You can re-run the job with <a href=\"{new_tool_shed_url}\" target=\"_blank\">tool id \"{self.id}\"</a>, which is a derivation of the original tool. '
if not self.is_latest_version:
message += 'There is a newer version of this tool available.'
except Exception as e:
raise exceptions.MessageException(unicodify(e))
return message
def get_default_history_by_trans(self, trans, create=False):
return trans.get_history(create=create)
@classmethod
def get_externally_referenced_paths(self, path):
""" Return relative paths to externally referenced files by the tool
described by file at `path`. External components should not assume things
about the structure of tool xml files (this is the tool's responsibility).
"""
tree = raw_tool_xml_tree(path)
root = tree.getroot()
external_paths = []
for code_elem in root.findall('code'):
external_path = code_elem.get('file')
if external_path:
external_paths.append(external_path)
external_paths.extend(imported_macro_paths(root))
# May also need to load external citation files as well at some point.
return external_paths
class OutputParameterJSONTool(Tool):
"""
Alternate implementation of Tool that provides parameters and other values
JSONified within the contents of an output dataset
"""
tool_type = 'output_parameter_json'
def _prepare_json_list(self, param_list):
rval = []
for value in param_list:
if isinstance(value, dict):
rval.append(self._prepare_json_param_dict(value))
elif isinstance(value, list):
rval.append(self._prepare_json_list(value))
else:
rval.append(str(value))
return rval
def _prepare_json_param_dict(self, param_dict):
rval = {}
for key, value in param_dict.items():
if isinstance(value, dict):
rval[key] = self._prepare_json_param_dict(value)
elif isinstance(value, list):
rval[key] = self._prepare_json_list(value)
else:
rval[key] = str(value)
return rval
def exec_before_job(self, app, inp_data, out_data, param_dict=None):
if param_dict is None:
param_dict = {}
json_params = {}
json_params['param_dict'] = self._prepare_json_param_dict(param_dict) # it would probably be better to store the original incoming parameters here, instead of the Galaxy modified ones?
json_params['output_data'] = []
json_params['job_config'] = dict(GALAXY_DATATYPES_CONF_FILE=param_dict.get('GALAXY_DATATYPES_CONF_FILE'), GALAXY_ROOT_DIR=param_dict.get('GALAXY_ROOT_DIR'), TOOL_PROVIDED_JOB_METADATA_FILE=self.provided_metadata_file)
json_filename = None
for out_name, data in out_data.items():
# use wrapped dataset to access certain values
wrapped_data = param_dict.get(out_name)
# allow multiple files to be created
file_name = str(wrapped_data)
extra_files_path = str(wrapped_data.files_path)
data_dict = dict(out_data_name=out_name,
ext=data.ext,
dataset_id=data.dataset.id,
hda_id=data.id,
file_name=file_name,
extra_files_path=extra_files_path)
json_params['output_data'].append(data_dict)
if json_filename is None:
json_filename = file_name
with open(json_filename, 'w') as out:
out.write(json.dumps(json_params))
class ExpressionTool(Tool):
requires_js_runtime = True
tool_type = 'expression'
tool_type_local = True
EXPRESSION_INPUTS_NAME = "_expression_inputs_.json"
def parse_command(self, tool_source):
self.command = f"cd ../; {expressions.EXPRESSION_SCRIPT_CALL}"
self.interpreter = None
self._expression = tool_source.parse_expression().strip()
def parse_outputs(self, tool_source):
# Setup self.outputs and self.output_collections
super().parse_outputs(tool_source)
# Validate these outputs for expression tools.
if len(self.output_collections) != 0:
message = "Expression tools may not declare output collections at this time."
raise Exception(message)
for output in self.outputs.values():
if not hasattr(output, "from_expression"):
message = "Expression tools may not declare output datasets at this time."
raise Exception(message)
def exec_before_job(self, app, inp_data, out_data, param_dict=None):
super().exec_before_job(app, inp_data, out_data, param_dict=param_dict)
local_working_directory = param_dict["__local_working_directory__"]
expression_inputs_path = os.path.join(local_working_directory, ExpressionTool.EXPRESSION_INPUTS_NAME)
outputs = []
for out_name in out_data.keys():
output_def = self.outputs[out_name]
wrapped_data = param_dict.get(out_name)
file_name = str(wrapped_data)
outputs.append(dict(
name=out_name,
from_expression=output_def.from_expression,
path=file_name,
))
if param_dict is None:
raise Exception("Internal error - param_dict is empty.")
job = {}
json_wrap(self.inputs, param_dict, self.profile, job, handle_files='OBJECT')
expression_inputs = {
'job': job,
'script': self._expression,
'outputs': outputs,
}
expressions.write_evalute_script(os.path.join(local_working_directory))
with open(expression_inputs_path, "w") as f:
json.dump(expression_inputs, f)
def exec_after_process(self, app, inp_data, out_data, param_dict, job=None, **kwds):
for key, val in self.outputs.items():
if key not in out_data:
# Skip filtered outputs
continue
if val.output_type == "data":
with open(out_data[key].file_name) as f:
src = json.load(f)
assert isinstance(src, dict), f"Expected dataset 'src' to be a dictionary - actual type is {type(src)}"
dataset_id = src["id"]
copy_object = None
for input_dataset in inp_data.values():
if input_dataset and input_dataset.id == dataset_id:
copy_object = input_dataset
break
if copy_object is None:
raise Exception("Failed to find dataset output.")
out_data[key].copy_from(copy_object)
def parse_environment_variables(self, tool_source):
""" Setup environment variable for inputs file.
"""
environmnt_variables_raw = super().parse_environment_variables(tool_source)
expression_script_inputs = dict(
name="GALAXY_EXPRESSION_INPUTS",
template=ExpressionTool.EXPRESSION_INPUTS_NAME,
)
environmnt_variables_raw.append(expression_script_inputs)
return environmnt_variables_raw
class DataSourceTool(OutputParameterJSONTool):
"""
Alternate implementation of Tool for data_source tools -- those that
allow the user to query and extract data from another web site.
"""
tool_type = 'data_source'
default_tool_action = DataSourceToolAction
def _build_GALAXY_URL_parameter(self):
return ToolParameter.build(self, XML(f'<param name="GALAXY_URL" type="baseurl" value="/tool_runner?tool_id={self.id}" />'))
def parse_inputs(self, tool_source):
super().parse_inputs(tool_source)
# Open all data_source tools in _top.
self.target = '_top'
if 'GALAXY_URL' not in self.inputs:
self.inputs['GALAXY_URL'] = self._build_GALAXY_URL_parameter()
self.inputs_by_page[0]['GALAXY_URL'] = self.inputs['GALAXY_URL']
def exec_before_job(self, app, inp_data, out_data, param_dict=None):
if param_dict is None:
param_dict = {}
dbkey = param_dict.get('dbkey')
info = param_dict.get('info')
data_type = param_dict.get('data_type')
name = param_dict.get('name')
json_params = {}
json_params['param_dict'] = self._prepare_json_param_dict(param_dict) # it would probably be better to store the original incoming parameters here, instead of the Galaxy modified ones?
json_params['output_data'] = []
json_params['job_config'] = dict(GALAXY_DATATYPES_CONF_FILE=param_dict.get('GALAXY_DATATYPES_CONF_FILE'), GALAXY_ROOT_DIR=param_dict.get('GALAXY_ROOT_DIR'), TOOL_PROVIDED_JOB_METADATA_FILE=self.provided_metadata_file)
json_filename = None
for out_name, data in out_data.items():
# use wrapped dataset to access certain values
wrapped_data = param_dict.get(out_name)
# allow multiple files to be created
cur_base_param_name = f'GALAXY|{out_name}|'
cur_name = param_dict.get(f"{cur_base_param_name}name", name)
cur_dbkey = param_dict.get(f"{cur_base_param_name}dkey", dbkey)
cur_info = param_dict.get(f"{cur_base_param_name}info", info)
cur_data_type = param_dict.get(f"{cur_base_param_name}data_type", data_type)
if cur_name:
data.name = cur_name
if not data.info and cur_info:
data.info = cur_info
if cur_dbkey:
data.dbkey = cur_dbkey
if cur_data_type:
data.extension = cur_data_type
file_name = str(wrapped_data)
extra_files_path = str(wrapped_data.files_path)
data_dict = dict(out_data_name=out_name,
ext=data.ext,
dataset_id=data.dataset.id,
hda_id=data.id,
file_name=file_name,
extra_files_path=extra_files_path)
json_params['output_data'].append(data_dict)
if json_filename is None:
json_filename = file_name
with open(json_filename, 'w') as out:
out.write(json.dumps(json_params))
class AsyncDataSourceTool(DataSourceTool):
tool_type = 'data_source_async'
def _build_GALAXY_URL_parameter(self):
return ToolParameter.build(self, XML(f'<param name="GALAXY_URL" type="baseurl" value="/async/{self.id}" />'))
class DataDestinationTool(Tool):
tool_type = 'data_destination'
class SetMetadataTool(Tool):
"""
Tool implementation for special tool that sets metadata on an existing
dataset.
"""
tool_type = 'set_metadata'
requires_setting_metadata = False
def regenerate_imported_metadata_if_needed(self, hda, history, job):
if len(hda.metadata_file_types) > 0:
self.tool_action.execute_via_app(
self, self.app, job.session_id,
history.id, job.user, incoming={'input1': hda}, overwrite=False
)
def exec_after_process(self, app, inp_data, out_data, param_dict, job=None, **kwds):
working_directory = app.object_store.get_filename(
job, base_dir='job_work', dir_only=True, obj_dir=True
)
for name, dataset in inp_data.items():
external_metadata = get_metadata_compute_strategy(app.config, job.id, tool_id=self.id)
sa_session = app.model.context
metadata_set_successfully = external_metadata.external_metadata_set_successfully(dataset, name, sa_session, working_directory=working_directory)
if metadata_set_successfully:
try:
# external_metadata_set_successfully is only an approximation (the metadata json file exists),
# things can still go wrong, but we don't want to fail here since it can lead to a resubmission loop
external_metadata.load_metadata(dataset, name, sa_session, working_directory=working_directory)
except Exception:
metadata_set_successfully = False
log.exception("Exception occured while loading metadata results")
if not metadata_set_successfully:
dataset._state = model.Dataset.states.FAILED_METADATA
self.sa_session.add(dataset)
self.sa_session.flush()
return
# If setting external metadata has failed, how can we inform the
# user? For now, we'll leave the default metadata and set the state
# back to its original.
dataset.datatype.after_setting_metadata(dataset)
if job and job.tool_id == '1.0.0':
dataset.state = param_dict.get('__ORIGINAL_DATASET_STATE__')
else:
# Revert dataset.state to fall back to dataset.dataset.state
dataset._state = None
# Need to reset the peek, which may rely on metadata
# TODO: move this into metadata setting, setting the peek requires dataset access,
# and large chunks of the dataset may be read here.
try:
dataset.set_peek()
except Exception:
log.exception("Exception occured while setting dataset peek")
self.sa_session.add(dataset)
self.sa_session.flush()
def job_failed(self, job_wrapper, message, exception=False):
job = job_wrapper.sa_session.query(model.Job).get(job_wrapper.job_id)
if job:
inp_data = {}
for dataset_assoc in job.input_datasets:
inp_data[dataset_assoc.name] = dataset_assoc.dataset
return self.exec_after_process(job_wrapper.app, inp_data, {}, job_wrapper.get_param_dict(), job=job)
class ExportHistoryTool(Tool):
tool_type = 'export_history'
class ImportHistoryTool(Tool):
tool_type = 'import_history'
def exec_after_process(self, app, inp_data, out_data, param_dict, job, final_job_state=None, **kwds):
super().exec_after_process(app, inp_data, out_data, param_dict, job=job, **kwds)
if final_job_state != DETECTED_JOB_STATE.OK:
return
JobImportHistoryArchiveWrapper(self.app, job.id).cleanup_after_job()
class InteractiveTool(Tool):
tool_type = 'interactive'
produces_entry_points = True
def __init__(self, config_file, tool_source, app, **kwd):
assert app.config.interactivetools_enable, ValueError('Trying to load an InteractiveTool, but InteractiveTools are not enabled.')
super().__init__(config_file, tool_source, app, **kwd)
def __remove_interactivetool_by_job(self, job):
if job:
eps = job.interactivetool_entry_points
log.debug('__remove_interactivetool_by_job: %s', eps)
self.app.interactivetool_manager.remove_entry_points(eps)
else:
log.warning("Could not determine job to stop InteractiveTool: %s", job)
def exec_after_process(self, app, inp_data, out_data, param_dict, job=None, **kwds):
# run original exec_after_process
super().exec_after_process(app, inp_data, out_data, param_dict, job=job, **kwds)
self.__remove_interactivetool_by_job(job)
def job_failed(self, job_wrapper, message, exception=False):
super().job_failed(job_wrapper, message, exception=exception)
job = job_wrapper.sa_session.query(model.Job).get(job_wrapper.job_id)
self.__remove_interactivetool_by_job(job)
class DataManagerTool(OutputParameterJSONTool):
tool_type = 'manage_data'
default_tool_action = DataManagerToolAction
def __init__(self, config_file, root, app, guid=None, data_manager_id=None, **kwds):
self.data_manager_id = data_manager_id
super().__init__(config_file, root, app, guid=guid, **kwds)
if self.data_manager_id is None:
self.data_manager_id = self.id
def exec_after_process(self, app, inp_data, out_data, param_dict, job=None, final_job_state=None, **kwds):
assert self.allow_user_access(job.user), "You must be an admin to access this tool."
if final_job_state != DETECTED_JOB_STATE.OK:
return
# run original exec_after_process
super().exec_after_process(app, inp_data, out_data, param_dict, job=job, **kwds)
# process results of tool
data_manager_id = job.data_manager_association.data_manager_id
data_manager = self.app.data_managers.get_manager(data_manager_id, None)
assert data_manager is not None, f"Invalid data manager ({data_manager_id}) requested. It may have been removed before the job completed."
data_manager.process_result(out_data)
def get_default_history_by_trans(self, trans, create=False):
def _create_data_manager_history(user):
history = trans.app.model.History(name='Data Manager History (automatically created)', user=user)
data_manager_association = trans.app.model.DataManagerHistoryAssociation(user=user, history=history)
trans.sa_session.add_all((history, data_manager_association))
trans.sa_session.flush()
return history
user = trans.user
assert user, 'You must be logged in to use this tool.'
assert self.allow_user_access(user), "You must be an admin to access this tool."
dm_history_associations = user.data_manager_histories
if not dm_history_associations:
# create
if create:
history = _create_data_manager_history(user)
else:
history = None
else:
for dm_history_association in reversed(dm_history_associations):
history = dm_history_association.history
if not history.deleted:
break
if history.deleted:
if create:
history = _create_data_manager_history(user)
else:
history = None
return history
def allow_user_access(self, user, attempting_access=True) -> bool:
"""Check user access to this tool.
:param user: model object representing user.
:type user: galaxy.model.User
:param attempting_access: is the user attempting to do something with the
the tool (set false for incidental checks like toolbox
listing)
:type attempting_access: bool
:returns: Whether the user is allowed to access the tool.
Data Manager tools are only accessible to admins.
"""
if super().allow_user_access(user) and self.app.config.is_admin_user(user):
return True
# If this is just an incidental check - do not log the scary message
# about users attempting to do something problematic.
if attempting_access:
if user:
user = user.id
log.debug("User (%s) attempted to access a data manager tool (%s), but is not an admin.", user, self.id)
return False
class DatabaseOperationTool(Tool):
default_tool_action = ModelOperationToolAction
require_dataset_ok = True
tool_type_local = True
@property
def valid_input_states(self):
if self.require_dataset_ok:
return (model.Dataset.states.OK,)
else:
return model.Dataset.terminal_states
@property
def allow_errored_inputs(self):
return not self.require_dataset_ok
def check_inputs_ready(self, input_datasets, input_dataset_collections):
def check_dataset_state(state):
if state in model.Dataset.non_ready_states:
raise ToolInputsNotReadyException("An input dataset is pending.")
if self.require_dataset_ok:
if state != model.Dataset.states.OK:
raise ValueError(f"Tool requires inputs to be in valid state, but dataset {input_dataset} is in state '{input_dataset.state}'")
for input_dataset in input_datasets.values():
check_dataset_state(input_dataset.state)
for input_dataset_collection_pairs in input_dataset_collections.values():
for input_dataset_collection, _ in input_dataset_collection_pairs:
if not input_dataset_collection.collection.populated_optimized:
raise ToolInputsNotReadyException("An input collection is not populated.")
states, _ = input_dataset_collection.collection.dataset_states_and_extensions_summary
for state in states:
check_dataset_state(state)
def _add_datasets_to_history(self, history, elements, datasets_visible=False):
for element_object in elements:
if getattr(element_object, "history_content_type", None) == "dataset":
element_object.visible = datasets_visible
history.stage_addition(element_object)
def produce_outputs(self, trans, out_data, output_collections, incoming, history, **kwds):
return self._outputs_dict()
def _outputs_dict(self):
return {}
class UnzipCollectionTool(DatabaseOperationTool):
tool_type = 'unzip_collection'
def produce_outputs(self, trans, out_data, output_collections, incoming, history, **kwds):
has_collection = incoming["input"]
if hasattr(has_collection, "element_type"):
# It is a DCE
collection = has_collection.element_object
else:
# It is an HDCA
collection = has_collection.collection
assert collection.collection_type == "paired"
forward_o, reverse_o = collection.dataset_instances
forward, reverse = forward_o.copy(copy_tags=forward_o.tags), reverse_o.copy(copy_tags=reverse_o.tags)
self._add_datasets_to_history(history, [forward, reverse])
out_data["forward"] = forward
out_data["reverse"] = reverse
class ZipCollectionTool(DatabaseOperationTool):
tool_type = 'zip_collection'
def produce_outputs(self, trans, out_data, output_collections, incoming, history, **kwds):
forward_o = incoming["input_forward"]
reverse_o = incoming["input_reverse"]
forward, reverse = forward_o.copy(copy_tags=forward_o.tags), reverse_o.copy(copy_tags=reverse_o.tags)
new_elements = {}
new_elements["forward"] = forward
new_elements["reverse"] = reverse
self._add_datasets_to_history(history, [forward, reverse])
output_collections.create_collection(
next(iter(self.outputs.values())), "output", elements=new_elements, propagate_hda_tags=False
)
class BuildListCollectionTool(DatabaseOperationTool):
tool_type = 'build_list'
def produce_outputs(self, trans, out_data, output_collections, incoming, history, **kwds):
new_elements = {}
for i, incoming_repeat in enumerate(incoming["datasets"]):
if incoming_repeat["input"]:
new_elements["%d" % i] = incoming_repeat["input"].copy(copy_tags=incoming_repeat["input"].tags)
self._add_datasets_to_history(history, new_elements.values())
output_collections.create_collection(
next(iter(self.outputs.values())), "output", elements=new_elements, propagate_hda_tags=False
)
class ExtractDatasetCollectionTool(DatabaseOperationTool):
tool_type = 'extract_dataset'
def produce_outputs(self, trans, out_data, output_collections, incoming, history, tags=None, **kwds):
has_collection = incoming["input"]
if hasattr(has_collection, "element_type"):
# It is a DCE
collection = has_collection.element_object
else:
# It is an HDCA
collection = has_collection.collection
collection_type = collection.collection_type
assert collection_type in ["list", "paired"]
how = incoming["which"]["which_dataset"]
if how == "first":
extracted_element = collection.first_dataset_element
elif how == "by_identifier":
extracted_element = collection[incoming["which"]["identifier"]]
elif how == "by_index":
extracted_element = collection[int(incoming["which"]["index"])]
else:
raise Exception("Invalid tool parameters.")
extracted = extracted_element.element_object
extracted_o = extracted.copy(copy_tags=extracted.tags, new_name=extracted_element.element_identifier)
self._add_datasets_to_history(history, [extracted_o], datasets_visible=True)
out_data["output"] = extracted_o
class MergeCollectionTool(DatabaseOperationTool):
tool_type = 'merge_collection'
def produce_outputs(self, trans, out_data, output_collections, incoming, history, **kwds):
input_lists = []
for incoming_repeat in incoming["inputs"]:
input_lists.append(incoming_repeat["input"])
advanced = incoming.get("advanced", None)
dupl_actions = "keep_first"
suffix_pattern = None
if advanced is not None:
dupl_actions = advanced["conflict"]['duplicate_options']
if dupl_actions in ['suffix_conflict', 'suffix_every', 'suffix_conflict_rest']:
suffix_pattern = advanced['conflict']['suffix_pattern']
new_element_structure = {}
# Which inputs does the identifier appear in.
identifiers_map = {}
for input_num, input_list in enumerate(input_lists):
for dce in input_list.collection.elements:
element_identifier = dce.element_identifier
if element_identifier not in identifiers_map:
identifiers_map[element_identifier] = []
elif dupl_actions == "fail":
raise Exception(f"Duplicate collection element identifiers found for [{element_identifier}]")
identifiers_map[element_identifier].append(input_num)
for copy, input_list in enumerate(input_lists):
for dce in input_list.collection.elements:
element = dce.element_object
valid = False
# dealing with a single element
if hasattr(element, "is_ok"):
if element.is_ok:
valid = True
elif hasattr(element, "dataset_instances"):
# we are probably a list:paired dataset, both need to be in non error state
forward_o, reverse_o = element.dataset_instances
if forward_o.is_ok and reverse_o.is_ok:
valid = True
if valid:
element_identifier = dce.element_identifier
identifier_seen = element_identifier in new_element_structure
appearances = identifiers_map[element_identifier]
add_suffix = False
if dupl_actions == "suffix_every":
add_suffix = True
elif dupl_actions == "suffix_conflict" and len(appearances) > 1:
add_suffix = True
elif dupl_actions == "suffix_conflict_rest" and len(appearances) > 1 and appearances[0] != copy:
add_suffix = True
if dupl_actions == "keep_first" and identifier_seen:
continue
if add_suffix:
suffix = suffix_pattern.replace("#", str(copy + 1))
effective_identifer = f"{element_identifier}{suffix}"
else:
effective_identifer = element_identifier
new_element_structure[effective_identifer] = element
# Don't copy until we know everything is fine and we have the structure of the list ready to go.
new_elements = {}
for key, value in new_element_structure.items():
if getattr(value, "history_content_type", None) == "dataset":
copied_value = value.copy(copy_tags=value.tags, flush=False)
else:
copied_value = value.copy()
new_elements[key] = copied_value
self._add_datasets_to_history(history, new_elements.values())
output_collections.create_collection(
next(iter(self.outputs.values())), "output", elements=new_elements, propagate_hda_tags=False
)
class FilterDatasetsTool(DatabaseOperationTool):
def _get_new_elements(self, history, elements_to_copy):
new_elements = {}
for dce in elements_to_copy:
element_identifier = dce.element_identifier
if getattr(dce.element_object, "history_content_type", None) == "dataset":
copied_value = dce.element_object.copy(copy_tags=dce.element_object.tags, flush=False)
else:
copied_value = dce.element_object.copy()
new_elements[element_identifier] = copied_value
return new_elements
def produce_outputs(self, trans, out_data, output_collections, incoming, history, **kwds):
collection = incoming["input"]
if hasattr(collection, 'element_object'):
# A list
elements = collection.element_object.elements
collection_type = collection.element_object.collection_type
else:
# A list of pairs
elements = collection.collection.elements
collection_type = collection.collection.collection_type
# We only process list or list of pair collections. Higher order collection will be mapped over
assert collection_type in ("list", "list:paired")
elements_to_copy = []
for element in elements:
if collection_type == 'list':
if self.element_is_valid(element):
elements_to_copy.append(element)
else:
valid = True
for child_element in element.child_collection.elements:
if not self.element_is_valid(child_element):
valid = False
if valid:
elements_to_copy.append(element)
new_elements = self._get_new_elements(history=history, elements_to_copy=elements_to_copy)
self._add_datasets_to_history(history, new_elements.values())
output_collections.create_collection(
next(iter(self.outputs.values())),
"output",
elements=new_elements,
propagate_hda_tags=False
)
class FilterFailedDatasetsTool(FilterDatasetsTool):
tool_type = 'filter_failed_datasets_collection'
require_dataset_ok = False
def element_is_valid(self, element):
return element.element_object.is_ok
class FilterEmptyDatasetsTool(FilterDatasetsTool):
tool_type = 'filter_empty_datasets_collection'
require_dataset_ok = False
def element_is_valid(self, element):
return element.element_object.has_data()
class FlattenTool(DatabaseOperationTool):
tool_type = 'flatten_collection'
def produce_outputs(self, trans, out_data, output_collections, incoming, history, **kwds):
hdca = incoming["input"]
join_identifier = incoming["join_identifier"]
new_elements = {}
copied_datasets = []
def add_elements(collection, prefix=""):
for dce in collection.elements:
dce_object = dce.element_object
dce_identifier = dce.element_identifier
identifier = f"{prefix}{join_identifier}{dce_identifier}" if prefix else dce_identifier
if dce.is_collection:
add_elements(dce_object, prefix=identifier)
else:
copied_dataset = dce_object.copy(copy_tags=dce_object.tags, flush=False)
new_elements[identifier] = copied_dataset
copied_datasets.append(copied_dataset)
add_elements(hdca.collection)
self._add_datasets_to_history(history, copied_datasets)
output_collections.create_collection(
next(iter(self.outputs.values())), "output", elements=new_elements, propagate_hda_tags=False
)
class SortTool(DatabaseOperationTool):
tool_type = 'sort_collection'
def produce_outputs(self, trans, out_data, output_collections, incoming, history, **kwds):
hdca = incoming["input"]
sorttype = incoming["sort_type"]["sort_type"]
new_elements = {}
elements = hdca.collection.elements
presort_elements = []
if sorttype == 'alpha':
presort_elements = [(dce.element_identifier, dce) for dce in elements]
elif sorttype == 'numeric':
presort_elements = [(int(re.sub('[^0-9]', '', dce.element_identifier)), dce) for dce in elements]
if presort_elements:
sorted_elements = [x[1] for x in sorted(presort_elements, key=lambda x: x[0])]
if sorttype == 'file':
hda = incoming["sort_type"]["sort_file"]
data_lines = hda.metadata.get('data_lines', 0)
if data_lines == len(elements):
old_elements_dict = {}
for element in elements:
old_elements_dict[element.element_identifier] = element
try:
with open(hda.file_name) as fh:
sorted_elements = [old_elements_dict[line.strip()] for line in fh]
except KeyError:
hdca_history_name = f"{hdca.hid}: {hdca.name}"
message = f"List of element identifiers does not match element identifiers in collection '{hdca_history_name}'"
raise Exception(message)
else:
message = "Number of lines must match number of list elements (%i), but file has %i lines"
raise Exception(message % (data_lines, len(elements)))
for dce in sorted_elements:
dce_object = dce.element_object
if getattr(dce_object, "history_content_type", None) == "dataset":
copied_dataset = dce_object.copy(copy_tags=dce_object.tags, flush=False)
else:
copied_dataset = dce_object.copy(flush=False)
new_elements[dce.element_identifier] = copied_dataset
self._add_datasets_to_history(history, new_elements.values())
output_collections.create_collection(
next(iter(self.outputs.values())), "output", elements=new_elements, propagate_hda_tags=False
)
class RelabelFromFileTool(DatabaseOperationTool):
tool_type = 'relabel_from_file'
def produce_outputs(self, trans, out_data, output_collections, incoming, history, **kwds):
hdca = incoming["input"]
how_type = incoming["how"]["how_select"]
new_labels_dataset_assoc = incoming["how"]["labels"]
strict = string_as_bool(incoming["how"]["strict"])
new_elements = {}
def add_copied_value_to_new_elements(new_label, dce_object):
new_label = new_label.strip()
if new_label in new_elements:
raise Exception(f"New identifier [{new_label}] appears twice in resulting collection, these values must be unique.")
if getattr(dce_object, "history_content_type", None) == "dataset":
copied_value = dce_object.copy(copy_tags=dce_object.tags, flush=False)
else:
copied_value = dce_object.copy()
new_elements[new_label] = copied_value
new_labels_path = new_labels_dataset_assoc.file_name
with open(new_labels_path) as fh:
new_labels = fh.readlines(1024 * 1000000)
if strict and len(hdca.collection.elements) != len(new_labels):
raise Exception("Relabel mapping file contains incorrect number of identifiers")
if how_type == "tabular":
# We have a tabular file, where the first column is an existing element identifier,
# and the second column is the new element identifier.
source_new_label = (line.strip().split('\t') for line in new_labels)
new_labels_dict = {source: new_label for source, new_label in source_new_label}
for dce in hdca.collection.elements:
dce_object = dce.element_object
element_identifier = dce.element_identifier
default = None if strict else element_identifier
new_label = new_labels_dict.get(element_identifier, default)
if not new_label:
raise Exception(f"Failed to find new label for identifier [{element_identifier}]")
add_copied_value_to_new_elements(new_label, dce_object)
else:
# If new_labels_dataset_assoc is not a two-column tabular dataset we label with the current line of the dataset
for i, dce in enumerate(hdca.collection.elements):
dce_object = dce.element_object
add_copied_value_to_new_elements(new_labels[i], dce_object)
for key in new_elements.keys():
if not re.match(r"^[\w\- \.,]+$", key):
raise Exception(f"Invalid new colleciton identifier [{key}]")
self._add_datasets_to_history(history, new_elements.values())
output_collections.create_collection(
next(iter(self.outputs.values())), "output", elements=new_elements, propagate_hda_tags=False
)
class ApplyRulesTool(DatabaseOperationTool):
tool_type = 'apply_rules'
def produce_outputs(self, trans, out_data, output_collections, incoming, history, tag_handler, **kwds):
hdca = incoming["input"]
rule_set = RuleSet(incoming["rules"])
copied_datasets = []
def copy_dataset(dataset, tags):
copied_dataset = dataset.copy(copy_tags=dataset.tags, flush=False)
if tags is not None:
tag_handler.set_tags_from_list(trans.get_user(), copied_dataset, tags, flush=False)
copied_dataset.history_id = history.id
copied_datasets.append(copied_dataset)
return copied_dataset
new_elements = self.app.dataset_collection_manager.apply_rules(
hdca, rule_set, copy_dataset
)
self._add_datasets_to_history(history, copied_datasets)
output_collections.create_collection(
next(iter(self.outputs.values())), "output", collection_type=rule_set.collection_type, elements=new_elements, propagate_hda_tags=False
)
class TagFromFileTool(DatabaseOperationTool):
tool_type = 'tag_from_file'
def produce_outputs(self, trans, out_data, output_collections, incoming, history, tag_handler, **kwds):
hdca = incoming["input"]
how = incoming['how']
new_tags_dataset_assoc = incoming["tags"]
new_elements = {}
new_datasets = []
def add_copied_value_to_new_elements(new_tags_dict, dce):
if getattr(dce.element_object, "history_content_type", None) == "dataset":
copied_value = dce.element_object.copy(copy_tags=dce.element_object.tags, flush=False)
# copy should never be visible, since part of a collection
copied_value.visble = False
new_datasets.append(copied_value)
new_tags = new_tags_dict.get(dce.element_identifier)
if new_tags:
if how in ('add', 'remove') and dce.element_object.tags:
# We need get the original tags and update them with the new tags
old_tags = {tag for tag in tag_handler.get_tags_str(dce.element_object.tags).split(',') if tag}
if how == 'add':
old_tags.update(set(new_tags))
elif how == 'remove':
old_tags = old_tags - set(new_tags)
new_tags = old_tags
tag_handler.add_tags_from_list(user=history.user, item=copied_value, new_tags_list=new_tags, flush=False)
else:
# We have a collection, and we copy the elements so that we don't manipulate the original tags
copied_value = dce.element_object.copy(element_destination=history)
for new_element, old_element in zip(copied_value.dataset_elements, dce.element_object.dataset_elements):
# TODO: This should be eliminated, but collections created by the collection builder
# don't set `visible` to `False` if you don't hide the original elements.
new_element.element_object.visible = False
new_tags = new_tags_dict.get(new_element.element_identifier)
if how in ('add', 'remove'):
old_tags = {tag for tag in tag_handler.get_tags_str(old_element.element_object.tags).split(',') if tag}
if new_tags:
if how == 'add':
old_tags.update(set(new_tags))
elif how == 'remove':
old_tags = old_tags - set(new_tags)
new_tags = old_tags
tag_handler.add_tags_from_list(user=history.user, item=new_element.element_object, new_tags_list=new_tags, flush=False)
new_elements[dce.element_identifier] = copied_value
new_tags_path = new_tags_dataset_assoc.file_name
with open(new_tags_path) as fh:
new_tags = fh.readlines(1024 * 1000000)
# We have a tabular file, where the first column is an existing element identifier,
# and the remaining columns represent new tags.
source_new_tags = (line.strip().split('\t') for line in new_tags)
new_tags_dict = {item[0]: item[1:] for item in source_new_tags}
for dce in hdca.collection.elements:
add_copied_value_to_new_elements(new_tags_dict, dce)
self._add_datasets_to_history(history, new_datasets)
output_collections.create_collection(
next(iter(self.outputs.values())), "output", elements=new_elements, propagate_hda_tags=False
)
class FilterFromFileTool(DatabaseOperationTool):
tool_type = 'filter_from_file'
def produce_outputs(self, trans, out_data, output_collections, incoming, history, **kwds):
hdca = incoming["input"]
how_filter = incoming["how"]["how_filter"]
filter_dataset_assoc = incoming["how"]["filter_source"]
filtered_elements = {}
discarded_elements = {}
filtered_path = filter_dataset_assoc.file_name
with open(filtered_path) as fh:
filtered_identifiers = [i.strip() for i in fh.readlines(1024 * 1000000)]
# If filtered_dataset_assoc is not a two-column tabular dataset we label with the current line of the dataset
for dce in hdca.collection.elements:
dce_object = dce.element_object
element_identifier = dce.element_identifier
in_filter_file = element_identifier in filtered_identifiers
passes_filter = in_filter_file if how_filter == "remove_if_absent" else not in_filter_file
if getattr(dce_object, "history_content_type", None) == "dataset":
copied_value = dce_object.copy(copy_tags=dce_object.tags, flush=False)
else:
copied_value = dce_object.copy()
if passes_filter:
filtered_elements[element_identifier] = copied_value
else:
discarded_elements[element_identifier] = copied_value
self._add_datasets_to_history(history, filtered_elements.values())
output_collections.create_collection(
self.outputs["output_filtered"], "output_filtered", elements=filtered_elements, propagate_hda_tags=False
)
self._add_datasets_to_history(history, discarded_elements.values())
output_collections.create_collection(
self.outputs["output_discarded"], "output_discarded", elements=discarded_elements, propagate_hda_tags=False
)
# Populate tool_type to ToolClass mappings
tool_types = {}
TOOL_CLASSES: List[Type[Tool]] = [
Tool,
SetMetadataTool,
OutputParameterJSONTool,
ExpressionTool,
InteractiveTool,
DataManagerTool,
DataSourceTool,
AsyncDataSourceTool,
UnzipCollectionTool,
ZipCollectionTool,
MergeCollectionTool,
RelabelFromFileTool,
FilterFromFileTool,
BuildListCollectionTool,
ExtractDatasetCollectionTool,
DataDestinationTool
]
for tool_class in TOOL_CLASSES:
tool_types[tool_class.tool_type] = tool_class
# ---- Utility classes to be factored out -----------------------------------
class TracksterConfig:
""" Trackster configuration encapsulation. """
def __init__(self, actions):
self.actions = actions
@staticmethod
def parse(root):
actions = []
for action_elt in root.findall("action"):
actions.append(SetParamAction.parse(action_elt))
return TracksterConfig(actions)
class SetParamAction:
""" Set parameter action. """
def __init__(self, name, output_name):
self.name = name
self.output_name = output_name
@staticmethod
def parse(elt):
""" Parse action from element. """
return SetParamAction(elt.get("name"), elt.get("output_name"))
class BadValue:
def __init__(self, value):
self.value = value
class InterruptedUpload(Exception):
pass
| 45.468586 | 225 | 0.630357 |
589dcfdaa03f9a2ad818ddc51d71f34225785942 | 1,844 | py | Python | cwgithub/cwgithub/spiders/github.py | trujunzhang/djzhang-targets | c2e327acde9d51f0455e7243f17d93d74b579501 | [
"MIT"
] | 2 | 2018-12-03T16:30:55.000Z | 2019-04-03T13:29:20.000Z | cwgithub/cwgithub/spiders/github.py | trujunzhang/djzhang-targets | c2e327acde9d51f0455e7243f17d93d74b579501 | [
"MIT"
] | null | null | null | cwgithub/cwgithub/spiders/github.py | trujunzhang/djzhang-targets | c2e327acde9d51f0455e7243f17d93d74b579501 | [
"MIT"
] | 1 | 2019-04-03T13:29:25.000Z | 2019-04-03T13:29:25.000Z | # -*- coding: utf-8 -*-
import scrapy
from selenium import webdriver
import time
from cwgithub.items import GithubItem
class GithubSpider(scrapy.Spider):
name = "github"
allowed_domains = ["github.com"]
start_urls = [
'https://github.com/login',
]
def __init__(self):
self.driver = webdriver.Firefox()
def spider_closed(self, spider):
self.driver.close()
def parse(self, response):
self.driver.get(response.url)
time.sleep(4)
username = self.driver.find_element_by_id("login_field")
password = self.driver.find_element_by_name("password")
username.send_keys("trujunzhang")
password.send_keys("wanghao720")
time.sleep(4)
self.driver.find_element_by_xpath("//input[@name='commit']").click()
time.sleep(4)
yield scrapy.Request("https://github.com/stars", self.parse_stars)
# return [FormRequest.from_response(response,
# formdata={'login': 'trujunzhang', 'password': 'wanghao'},
# callback=self.after_login)]
def parse_stars(self, response):
self.driver.get(response.url)
time.sleep(2)
_repo_list = self.driver.find_element_by_id('js-repo-list')
_li_list = _repo_list.find_elements_by_tag_name('li')
for list in _li_list:
_h3_ele = list.find_element_by_class_name('repo-list-name')
_title = _h3_ele.text
_href = _h3_ele.find_element_by_tag_name('a').get_attribute('href')
_description = list.find_element_by_class_name('repo-list-description').text
item = GithubItem(
title=_title,
href=_href,
description=_description
)
yield item
| 30.229508 | 101 | 0.603037 |
25628b507eafd88f9c5f1685524f5654b6918292 | 60 | py | Python | src/spacel/provision/app/db/__init__.py | mycloudandme/spacel-provision | 900b8ada0017f727163c5c2ae464e17d747ba0e8 | [
"MIT"
] | 2 | 2016-05-18T11:10:27.000Z | 2016-05-18T13:25:04.000Z | src/spacel/provision/app/db/__init__.py | mycloudandme/spacel-provision | 900b8ada0017f727163c5c2ae464e17d747ba0e8 | [
"MIT"
] | null | null | null | src/spacel/provision/app/db/__init__.py | mycloudandme/spacel-provision | 900b8ada0017f727163c5c2ae464e17d747ba0e8 | [
"MIT"
] | null | null | null | from .cache import CacheFactory
from .rds import RdsFactory
| 20 | 31 | 0.833333 |
2689734c07d079150e4b295ad5ab24d6bf93bc44 | 4,418 | py | Python | dm_alchemy/types/helpers.py | locross93/dm_alchemy | 35449de51d56c427959ae6a3be13d6c6ab738be5 | [
"Apache-2.0"
] | 182 | 2021-02-08T15:25:06.000Z | 2022-03-31T00:46:23.000Z | dm_alchemy/types/helpers.py | locross93/dm_alchemy | 35449de51d56c427959ae6a3be13d6c6ab738be5 | [
"Apache-2.0"
] | 6 | 2021-02-12T10:42:51.000Z | 2022-03-14T23:59:45.000Z | dm_alchemy/types/helpers.py | locross93/dm_alchemy | 35449de51d56c427959ae6a3be13d6c6ab738be5 | [
"Apache-2.0"
] | 18 | 2021-02-08T20:37:22.000Z | 2022-03-15T20:54:14.000Z | # Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Helper functions and global variables for ideal observer."""
import math
from typing import List, Sequence
import numpy as np
UNKNOWN = -1000
END_TRIAL = -2
def str_np_array_construct(a: np.ndarray) -> str:
return 'np.' + repr(a)
def perm_to_index(perm: Sequence[int], perm_index_to_index: np.ndarray) -> int:
"""Converts a permutation to an integer.
We first treat the permutation as a tuple of integers which can be any value
between 0 and len(perm) - 1. Then we use the precomputed perm_index_to_index
to convert from this to indices between 0 and len(perm)!.
For example if the permutation is [0, 1, 2] this maps to
0 * 3^2 + 1 * 3^1 + 2 * 3^0 = 5
Then we look up perm_index_to_index[5] which is 0.
Args:
perm: A permutation.
perm_index_to_index: A matrix which converts valid permutations of length 3
to indices between 0 and 3!.
Returns:
An integer representing the permutation.
"""
return perm_index_to_index[np.ravel_multi_index(
tuple(perm), tuple(len(perm) for _ in range(len(perm))))]
def perm_from_index(
ind: int, num_elements, index_to_perm_index: np.ndarray) -> List[int]:
# Do the inverse of perm_to_index.
return [int(i) for i in np.unravel_index(
index_to_perm_index[ind],
tuple(num_elements for _ in range(num_elements)))]
def partial_perm_to_index(
partial_perm: Sequence[int], perm_index_to_index: np.ndarray) -> int:
"""Converts permutation of length 3 with potentially unknown values to an int."""
# We cannot have just 1 unknown value because knowing the others mean it is
# determined. Therefore with a length 3 sequence we either have 0, 1 or 3
# knowns.
# To make this work for permutations of lengths other than 3 we would have to
# consider all cases where the number of knowns is 0, 1, .... n - 2, n.
# If the number of knowns is m there are m! ways to order them, n choose m
# ways to select the known values and n choose m ways to place them in the
# permutation. Since we only need to deal with permutations of length 3 we
# just deal with that special case here.
if len(partial_perm) != 3:
raise ValueError('Function only deals with permutations of length 3.')
first_unknown = UNKNOWN
first_known = UNKNOWN
known_val = UNKNOWN
for i, p in enumerate(partial_perm):
if p == UNKNOWN:
if first_unknown == UNKNOWN:
first_unknown = i
else:
if first_known == UNKNOWN:
first_known = i
known_val = p
# If we have 0 unknowns encode as normal.
if first_unknown == UNKNOWN:
return perm_to_index(partial_perm, perm_index_to_index)
num_axes = len(partial_perm)
num_simple_perms = math.factorial(num_axes)
# If we have 0 knowns use the next value.
if first_known == UNKNOWN:
return num_simple_perms
# If we have 2 unknowns then we can encode this using the position and value
# of the first (and only) known element.
return num_simple_perms + 1 + int(np.ravel_multi_index(
(first_known, known_val), (num_axes, num_axes)))
def partial_perm_from_index(
ind: int, num_elements: int, index_to_perm_index: np.ndarray
) -> List[int]:
"""Converts int to permutation of length 3 with potentially unknown values."""
num_simple_perms = math.factorial(num_elements)
if ind < num_simple_perms:
return perm_from_index(ind, num_elements, index_to_perm_index)
none_known = [UNKNOWN for _ in range(num_elements)]
if ind == num_simple_perms:
return none_known
known_pos, known_val = np.unravel_index(
ind - num_simple_perms - 1, (num_elements, num_elements)) # pylint: disable=unbalanced-tuple-unpacking
none_known[known_pos] = int(known_val)
return none_known
| 37.440678 | 109 | 0.714124 |
14231afcc422af9d209945d5157198c402ead5c5 | 1,667 | py | Python | examples/quantities_ex.py | melissawm/PyLaTeX-1 | 125611520b783293e925dc8770af5c3b39f2926f | [
"MIT"
] | null | null | null | examples/quantities_ex.py | melissawm/PyLaTeX-1 | 125611520b783293e925dc8770af5c3b39f2926f | [
"MIT"
] | null | null | null | examples/quantities_ex.py | melissawm/PyLaTeX-1 | 125611520b783293e925dc8770af5c3b39f2926f | [
"MIT"
] | 1 | 2018-09-27T01:02:55.000Z | 2018-09-27T01:02:55.000Z | #!/usr/bin/python
"""
This example shows quantities functionality.
.. :copyright: (c) 2014 by Jelte Fennema.
:license: MIT, see License for more details.
"""
# begin-doc-include
import quantities as pq
from pylatex import Document, Section, Subsection, Math, Quantity
if __name__ == '__main__':
doc = Document()
section = Section('Quantity tests')
subsection = Subsection('Scalars with units')
G = pq.constants.Newtonian_constant_of_gravitation
moon_earth_distance = 384400 * pq.km
moon_mass = 7.34767309e22 * pq.kg
earth_mass = 5.972e24 * pq.kg
moon_earth_force = G * moon_mass * earth_mass / moon_earth_distance**2
q1 = Quantity(moon_earth_force.rescale(pq.newton),
options={'round-precision': 4, 'round-mode': 'figures'})
math = Math(data=['F=', q1])
subsection.append(math)
section.append(subsection)
subsection = Subsection('Scalars without units')
world_population = 7400219037
N = Quantity(world_population, options={'round-precision': 2,
'round-mode': 'figures'},
format_cb="{0:23.17e}".format)
subsection.append(Math(data=['N=', N]))
section.append(subsection)
subsection = Subsection('Scalars with uncertainties')
width = pq.UncertainQuantity(7.0, pq.meter, .4)
length = pq.UncertainQuantity(6.0, pq.meter, .3)
area = Quantity(width*length, options='separate-uncertainty',
format_cb=lambda x: "{0:.1f}".format(float(x)))
subsection.append(Math(data=['A=', area]))
section.append(subsection)
doc.append(section)
doc.generate_pdf('quantities_ex')
| 34.729167 | 74 | 0.656269 |
f8e96340c34e18f0e214b3a91bc8c732a103a080 | 16,172 | py | Python | qa/rpc-tests/p2p-fullblocktest.py | CollegicoinCLG/collegicoin-old | a82417e181df0798d9186aae458dbc3ff87f6de2 | [
"MIT"
] | 1 | 2019-12-21T23:25:47.000Z | 2019-12-21T23:25:47.000Z | qa/rpc-tests/p2p-fullblocktest.py | CollegicoinCLG/collegicoin-old | a82417e181df0798d9186aae458dbc3ff87f6de2 | [
"MIT"
] | null | null | null | qa/rpc-tests/p2p-fullblocktest.py | CollegicoinCLG/collegicoin-old | a82417e181df0798d9186aae458dbc3ff87f6de2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
from test_framework.key import CECKey
from test_framework.script import CScript, SignatureHash, SIGHASH_ALL, OP_TRUE, OP_FALSE
class PreviousSpendableOutput(object):
def __init__(self, tx = CTransaction(), n = -1):
self.tx = tx
self.n = n # the output we're spending
'''
This reimplements tests from the collegicoinj/FullBlockTestGenerator used
by the pull-tester.
We use the testing framework in which we expect a particular answer from
each test.
'''
class FullBlockTest(ComparisonTestFramework):
''' Can either run this test as 1 node with expected answers, or two and compare them.
Change the "outcome" variable from each TestInstance object to only do the comparison. '''
def __init__(self):
self.num_nodes = 1
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"horsebattery")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.block_time = int(time.time())+1
self.tip = None
self.blocks = {}
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
sync_masternodes(self.nodes)
test.run()
def add_transactions_to_block(self, block, tx_list):
[ tx.rehash() for tx in tx_list ]
block.vtx.extend(tx_list)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
return block
# Create a block on top of self.tip, and advance self.tip to point to the new block
# if spend is specified, then 1 satoshi will be spent from that to an anyone-can-spend output,
# and rest will go to fees.
def next_block(self, number, spend=None, additional_coinbase_value=0, script=None):
if self.tip == None:
base_block_hash = self.genesis_hash
else:
base_block_hash = self.tip.sha256
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
if (spend != None):
coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, self.block_time)
if (spend != None):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(spend.tx.sha256, spend.n), b"", 0xffffffff)) # no signature yet
# This copies the java comparison tool testing behavior: the first
# txout has a garbage scriptPubKey, "to make sure we're not
# pre-verifying too much" (?)
tx.vout.append(CTxOut(0, CScript([random.randint(0,255), height & 255])))
if script == None:
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
else:
tx.vout.append(CTxOut(1, script))
# Now sign it if necessary
scriptSig = b""
scriptPubKey = bytearray(spend.tx.vout[spend.n].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # looks like an anyone-can-spend
scriptSig = CScript([OP_TRUE])
else:
# We have to actually sign it
(sighash, err) = SignatureHash(spend.tx.vout[spend.n].scriptPubKey, tx, 0, SIGHASH_ALL)
scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
tx.vin[0].scriptSig = scriptSig
# Now add the transaction to the block
block = self.add_transactions_to_block(block, [tx])
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
self.block_time += 1
assert number not in self.blocks
self.blocks[number] = block
return block
def get_tests(self):
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previous marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# returns a test case that asserts that the current tip was accepted
def accepted():
return TestInstance([[self.tip, True]])
# returns a test case that asserts that the current tip was rejected
def rejected(reject = None):
if reject is None:
return TestInstance([[self.tip, False]])
else:
return TestInstance([[self.tip, reject]])
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
# add transactions to a block produced by next_block
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
old_hash = block.sha256
self.add_transactions_to_block(block, new_transactions)
block.solve()
# Update the internal state just like in next_block
self.tip = block
self.block_heights[block.sha256] = self.block_heights[old_hash]
del self.block_heights[old_hash]
self.blocks[block_number] = block
return block
# creates a new block and advances the tip to that block
block = self.next_block
# Create a new block
block(0)
save_spendable_output()
yield accepted()
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(99):
block(1000 + i)
test.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
yield test
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
out0 = get_spendable_output()
block(1, spend=out0)
save_spendable_output()
yield accepted()
out1 = get_spendable_output()
b2 = block(2, spend=out1)
yield accepted()
# so fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
tip(1)
b3 = block(3, spend=out1)
txout_b3 = PreviousSpendableOutput(b3.vtx[1], 1)
yield rejected()
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
out2 = get_spendable_output()
block(4, spend=out2)
yield accepted()
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
tip(2)
block(5, spend=out2)
save_spendable_output()
yield rejected()
out3 = get_spendable_output()
block(6, spend=out3)
yield accepted()
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(7, spend=out2)
yield rejected()
out4 = get_spendable_output()
block(8, spend=out4)
yield rejected()
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
tip(6)
block(9, spend=out4, additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(10, spend=out3)
yield rejected()
block(11, spend=out4, additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# (b12 added last)
# \-> b3 (1) -> b4 (2)
tip(5)
b12 = block(12, spend=out3)
save_spendable_output()
#yield TestInstance([[b12, False]])
b13 = block(13, spend=out4)
# Deliver the block header for b12, and the block b13.
# b13 should be accepted but the tip won't advance until b12 is delivered.
yield TestInstance([[CBlockHeader(b12), None], [b13, False]])
save_spendable_output()
out5 = get_spendable_output()
# b14 is invalid, but the node won't know that until it tries to connect
# Tip still can't advance because b12 is missing
block(14, spend=out5, additional_coinbase_value=1)
yield rejected()
yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13.
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
# Test that a block with a lot of checksigs is okay
lots_of_checksigs = CScript([OP_CHECKSIG] * (1000000 // 50 - 1))
tip(13)
block(15, spend=out5, script=lots_of_checksigs)
yield accepted()
# Test that a block with too many checksigs is rejected
out6 = get_spendable_output()
too_many_checksigs = CScript([OP_CHECKSIG] * (1000000 // 50))
block(16, spend=out6, script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
tip(15)
block(17, spend=txout_b3)
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
tip(13)
block(18, spend=txout_b3)
yield rejected()
block(19, spend=out6)
yield rejected()
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
out7 = get_spendable_output()
block(20, spend=out7)
yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase'))
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
tip(13)
block(21, spend=out6)
yield rejected()
block(22, spend=out5)
yield rejected()
# Create a block on either side of MAX_BLOCK_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b23 = block(23, spend=out6)
old_hash = b23.sha256
tx = CTransaction()
script_length = MAX_BLOCK_SIZE - len(b23.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 1)))
b23 = update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_SIZE)
yield accepted()
# Make the next block one byte bigger and check that it fails
tip(15)
b24 = block(24, spend=out6)
script_length = MAX_BLOCK_SIZE - len(b24.serialize()) - 69
script_output = CScript([b'\x00' * (script_length+1)])
tx.vout = [CTxOut(0, script_output)]
b24 = update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_SIZE+1)
yield rejected(RejectResult(16, b'bad-blk-length'))
b25 = block(25, spend=out7)
yield rejected()
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b26 = block(26, spend=out6)
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = update_block(26, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b26 chain to make sure collegicoind isn't accepting b26
b27 = block(27, spend=out7)
yield rejected()
# Now try a too-large-coinbase script
tip(15)
b28 = block(28, spend=out6)
b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
b28.vtx[0].rehash()
b28 = update_block(28, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b28 chain to make sure collegicoind isn't accepted b28
b29 = block(29, spend=out7)
# TODO: Should get a reject message back with "bad-prevblk", except
# there's a bug that prevents this from being detected. Just note
# failure for now, and add the reject result later.
yield rejected()
# b30 has a max-sized coinbase scriptSig.
tip(23)
b30 = block(30)
b30.vtx[0].vin[0].scriptSig = b'\x00' * 100
b30.vtx[0].rehash()
b30 = update_block(30, [])
yield accepted()
if __name__ == '__main__':
FullBlockTest().main()
| 40.129032 | 106 | 0.536606 |
77af8f947adf75bf371079eeae4b841a80d12028 | 1,876 | py | Python | ngcasa/deconvolution/deconvolve_multiterm_clean.py | wxiongccnu1990/cngi_prototype | 7a7230485acc9f8f2be534a832522339153d521e | [
"Apache-2.0"
] | null | null | null | ngcasa/deconvolution/deconvolve_multiterm_clean.py | wxiongccnu1990/cngi_prototype | 7a7230485acc9f8f2be534a832522339153d521e | [
"Apache-2.0"
] | null | null | null | ngcasa/deconvolution/deconvolve_multiterm_clean.py | wxiongccnu1990/cngi_prototype | 7a7230485acc9f8f2be534a832522339153d521e | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 AUI, Inc. Washington DC, USA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def deconvolve_multiterm_clean(img_dataset, deconvolve_parms, storage_parms):
"""
.. todo::
This function is not yet implemented
An iterative solver to construct a model from an observed image(set) and psf(set).
Sky model - A (multi-term) linear combination of basis functions.
Multi-scale : Basis functions are inverted tapered paraboloids
Multi-scale MFS : Basis functions are Taylor polynomials in frequency
Options :
- MS-Clean - Multi-scale CLEAN ( MS-MFS Clean with nterms=1 )
- Input - Requires an input cube (mfs is a cube with nchan=1)
- Output - Cube model image
- MS-MFS Clean - Wideband Imaging that solves for a set of Taylor coefficient maps.
- Input - Multi-channel cube.
- Output : Taylor coefficient maps, Spectral Index + Evaluation of the model to a Cube model image
Step (1) cngi.image.cube_to_mfs()
Step (2) Implement the multi-term deconvolution algorithm
Step (3) cngi.image.mfs_to_cube()
The special case of nscales=1 and nterms=1 is the same use-case as deconvolve_point_clean.
Returns
-------
img_dataset : xarray.core.dataset.Dataset
"""
| 36.784314 | 106 | 0.678038 |
06248a71896a187b4c6ae43ae63535ba8d309551 | 1,252 | py | Python | gaze_estimation/gaze_estimator/head_pose_estimation/face_landmark_estimator.py | hysts/pytorch_mpiigaze | 34d7d2fd9102688bc7a523a6f1d9542f8b934aa8 | [
"MIT"
] | 240 | 2018-01-24T19:43:27.000Z | 2022-03-29T17:46:54.000Z | gaze_estimation/gaze_estimator/head_pose_estimation/face_landmark_estimator.py | hysts/pytorch_mpiigaze | 34d7d2fd9102688bc7a523a6f1d9542f8b934aa8 | [
"MIT"
] | 54 | 2018-01-20T23:25:55.000Z | 2022-02-26T06:34:36.000Z | gaze_estimation/gaze_estimator/head_pose_estimation/face_landmark_estimator.py | hysts/pytorch_mpiigaze | 34d7d2fd9102688bc7a523a6f1d9542f8b934aa8 | [
"MIT"
] | 74 | 2018-01-20T20:51:20.000Z | 2022-03-07T10:50:13.000Z | from typing import List
import dlib
import numpy as np
import yacs.config
from ..common import Face
class LandmarkEstimator:
def __init__(self, config: yacs.config.CfgNode):
self.mode = config.face_detector.mode
if self.mode == 'dlib':
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor(
config.face_detector.dlib.model)
else:
raise ValueError
def detect_faces(self, image: np.ndarray) -> List[Face]:
if self.mode == 'dlib':
return self._detect_faces_dlib(image)
else:
raise ValueError
def _detect_faces_dlib(self, image: np.ndarray) -> List[Face]:
bboxes = self.detector(image[:, :, ::-1], 0)
detected = []
for bbox in bboxes:
predictions = self.predictor(image[:, :, ::-1], bbox)
landmarks = np.array([(pt.x, pt.y) for pt in predictions.parts()],
dtype=np.float)
bbox = np.array([[bbox.left(), bbox.top()],
[bbox.right(), bbox.bottom()]],
dtype=np.float)
detected.append(Face(bbox, landmarks))
return detected
| 32.947368 | 78 | 0.559904 |
6825f6b3c50a037c935fc02116bad3a0da894d7a | 13,829 | py | Python | toolbox/db/write/rebuild.py | Alexd14/toolbox | cf834994352d2d997124e69bc7ff43bcd6ac145f | [
"MIT"
] | null | null | null | toolbox/db/write/rebuild.py | Alexd14/toolbox | cf834994352d2d997124e69bc7ff43bcd6ac145f | [
"MIT"
] | null | null | null | toolbox/db/write/rebuild.py | Alexd14/toolbox | cf834994352d2d997124e69bc7ff43bcd6ac145f | [
"MIT"
] | null | null | null | from toolbox.db.read.etf_universe import clear_cache
from toolbox.db.write.create_tables import IngestDataBase
from toolbox.db.write.make_universes import compustat_us_universe, crsp_us_universe
def rebuild_db(drop: bool = False):
"""
code to rebuild the database from scratch
:param drop: should we drop the current tables
"""
tbls = [
#
# CRSP Compustat Link
#
{
'table': 'crsp_cstat_link',
'schema': 'ccm',
'file_path': '/Users/alex/Desktop/WRDS/CRSP/Annual Update/CRSP:Compustst Merged/Compustat CRSP Link/rc9ie3efp9e3opdf.csv',
'custom': """
UPDATE ccm.crsp_cstat_link SET LINKENDDT=99991231 WHERE LINKENDDT = 'E';
""",
'alter_type': {'LINKDT': ['timestamp', '%Y%m%d'],
'LINKENDDT': ['timestamp', '%Y%m%d'],
'dldte': ['timestamp', '%Y%m%d']},
'index': [{'name': 'ccm_link_lpermno_idx', 'column': 'lpermno'},
{'name': 'ccm_link_gvkey_idx', 'column': 'gvkey'},
{'name': 'ccm_link_liid_idx', 'column': 'liid'}]
},
#
# CRSP
#
{
'rows_to_interpret': 2_000_000,
'table': 'security_daily',
'schema': 'crsp',
'file_path': '/Users/alex/Desktop/WRDS/CRSP/Annual Update/Stock : Security Files/Daily Stock File/ndrekzi6lud82dpo.csv',
'rename': {},
'alter_type': {'date': ['timestamp', '%Y%m%d'],
'nameendt': ['timestamp', '%Y%m%d'],
'shrenddt': ['timestamp', '%Y%m%d'],
'nextdt': ['timestamp', '%Y%m%d'],
'dlpdt': ['timestamp', '%Y%m%d'],
'dclrdt': ['timestamp', '%Y%m%d'],
'rcrddt': ['timestamp', '%Y%m%d'],
'paydt': ['timestamp', '%Y%m%d']},
'index': [{'name': 'crsp_sd_date_idx', 'column': 'date'},
{'name': 'crsp_sd_permno_idx', 'column': 'permno'}]
},
{
'table': 'stock_header_info',
'schema': 'crsp',
'file_path': '/Users/alex/Desktop/WRDS/CRSP/Annual Update/Stock : Security Files/Stock Header Info/2xzpc1ww0dih4jk0.csv',
'custom': """
UPDATE crsp.stock_header_info SET begvol=NULL WHERE begvol = 'Z';
UPDATE crsp.stock_header_info SET endvol=NULL WHERE endvol = 'Z';
""",
'alter_type': {'begdat': ['timestamp', '%Y%m%d'],
'enddat': ['timestamp', '%Y%m%d'],
'begprc': ['timestamp', '%Y%m%d'],
'endprc': ['timestamp', '%Y%m%d'],
'begvol': ['timestamp', '%Y%m%d'],
'endvol': ['timestamp', '%Y%m%d'],
'begret': ['timestamp', '%Y%m%d'],
'endret': ['timestamp', '%Y%m%d']},
'rename': {},
'index': [{'name': 'crsp_sh_permno_idx', 'column': 'permno'}]
},
{
'table': 'distributions',
'schema': 'crsp',
'file_path': '/Users/alex/Desktop/WRDS/CRSP/Annual Update/Stock : Events/Distribution/y0vypfj8geyzhs0l.csv',
'alter_type': {'dclrdt': ['timestamp', '%Y%m%d'],
'rcrddt': ['timestamp', '%Y%m%d'],
'paydt': ['timestamp', '%Y%m%d'],
'exdt': ['timestamp', '%Y%m%d']},
'rename': {},
'index': [{'name': 'crsp_dist_permno_idx', 'column': 'permno'}]
},
{ # this has non valid UTF8 so had to adjust file
'rows_to_interpret': 500_000,
'table': 'fund_summary',
'schema': 'crsp',
'file_path': '/Users/alex/Desktop/WRDS/CRSP/Quarterly Update/Mutual Fund/Fund Summary/Fund Summary 196912-202109.gz',
'rename': {'caldt': 'date'},
# 'where': """ et_flag = 'F' """,
'alter_type': {'date': ['timestamp', '%Y%m%d'],
'first_offer_dt': ['timestamp', '%Y%m%d'],
'mgr_dt': ['timestamp', '%Y%m%d'],
'end_dt': ['timestamp', '%Y%m%d'],
'nav_latest_dt': ['timestamp', '%Y%m%d'],
'nav_52w_h_dt': ['timestamp', '%Y%m%d'],
'nav_52w_l_dt': ['timestamp', '%Y%m%d'],
'unrealized_app_dt': ['timestamp', '%Y%m%d'],
'maturity_dt': ['timestamp', '%Y%m%d'],
'fiscal_yearend': ['timestamp', '%Y%m%d']},
'index': [{'name': 'crsp_mffs_date_idx', 'column': 'date'},
{'name': 'crsp_mffs_crsp_portno_idx', 'column': 'crsp_portno'},
{'name': 'crsp_mffs_ticker_idx', 'column': 'ticker'}]
},
{
'rows_to_interpret': 2_000_000,
'table': 'portfolio_holdings',
'schema': 'crsp',
'file_path': '/Users/alex/Desktop/WRDS/CRSP/Quarterly Update/Mutual Fund/Portfolio Holdings/Portfolio Holdings 200101-202109.gz',
# 'where': """ crsp_portno in (SELECT distinct crsp_portno FROM crsp.fund_summary) """,
'rename': {'report_dt': 'date'},
'alter_type': {'date': ['timestamp', '%Y%m%d'],
'eff_dt': ['timestamp', '%Y%m%d'],
'maturity_dt': ['timestamp', '%Y%m%d']},
'index': [{'name': 'crsp_mfph_date_idx', 'column': 'date'},
{'name': 'crsp_mfph_crsp_portno_idx', 'column': 'crsp_portno'},
{'name': 'crsp_mfph_permno_idx', 'column': 'permno'}]
},
#
# Compustat
#
{
'table': 'fundamental_annual',
'schema': 'cstat',
'file_path': '/Users/alex/Desktop/WRDS/Compustat - Capital IQ/Compustat/North America/Fundementals Annual/gkm6i8iuxd46uuw1.csv',
'rename': {'pdate': 'date'},
'alter_type': {'date': ['timestamp', '%Y%m%d'],
'DLDTE': ['timestamp', '%Y%m%d'],
'IPODATE ': ['timestamp', '%Y%m%d'],
'APDEDATE': ['timestamp', '%Y%m%d'],
'FDATE': ['timestamp', '%Y%m%d']},
'index': [{'name': 'cstat_fa_date_idx', 'column': 'date'},
{'name': 'cstat_fa_gvkey_idx', 'column': 'gvkey'}]
},
{
'rows_to_interpret': 500_000,
'table': 'security_daily',
'schema': 'cstat',
'file_path': '/Users/alex/Desktop/WRDS/Compustat - Capital IQ/Compustat/North America/Security Daily/Security Daily 19831231-2021228.csv.gz',
'custom': """
ALTER TABLE cstat.security_daily ADD COLUMN id VARCHAR;
ALTER TABLE cstat.security_daily ALTER id SET DATA TYPE VARCHAR USING CONCAT(gvkey, '_', iid);
ALTER TABLE cstat.security_daily DROP BUSDESC;
""",
'rename': {'datadate': 'date'},
'alter_type': {'DATE': ['timestamp', '%Y%m%d'],
'DLDTE': ['timestamp', '%Y%m%d'],
'IPODATE': ['timestamp', '%Y%m%d'],
'ANNCDATE': ['timestamp', '%Y%m%d'],
'CAPGNPAYDATE': ['timestamp', '%Y%m%d'],
'CHEQVPAYDATE': ['timestamp', '%Y%m%d'],
'DIVDPAYDATE': ['timestamp', '%Y%m%d'],
'DIVSPPAYDATE': ['timestamp', '%Y%m%d'],
'PAYDATE': ['timestamp', '%Y%m%d'],
'RECORDDATE': ['timestamp', '%Y%m%d']
},
'index': [{'name': 'cstat_sd_date_idx', 'column': 'date'},
{'name': 'cstat_sd_gvkey_idx', 'column': 'gvkey'},
{'name': 'cstat_sd_iid_idx', 'column': 'iid'},
{'name': 'cstat_sd_id_idx', 'column': 'id'}]
},
#
# WRDS
#
{
'rows_to_interpret': 50_000,
'schema': 'wrds',
'table': 'firm_ratios',
'file_path': '/Users/alex/Desktop/WRDS/Finical Ratio Suite by WRDS/Finanical Ratios /IBES Financial Ratios By Firm Level WRDS/Financial Ratios IBES 19700131-20210102.gz',
'rename': {'public_date': 'date'},
'alter_type': {'adate': ['timestamp', '%Y%m%d'],
'qdate': ['timestamp', '%Y%m%d'],
'date': ['timestamp', '%Y%m%d']},
'index':
[{'name': 'wrds_firm_date_idx', 'column': 'date'},
{'name': 'wrds_firm_permno_idx', 'column': 'permno'},
{'name': 'wrds_firm_gvkey_idx', 'column': 'gvkey'}]
},
{
'rows_to_interpret': 50_000,
'schema': 'wrds',
'table': 'subsidiary',
'file_path': '/Users/alex/Desktop/WRDS/Subsidary Data By WRDS/Company Subsidiaries/WRDS Company Subidiary Data (Beta) 199312-202004.gz',
'rename': {'SECPDATE': 'date'},
'alter_type': {'FDATE': ['timestamp', '%Y%m%d'],
'RDATE': ['timestamp', '%Y%m%d'],
'date': ['timestamp', '%Y%m%d']},
'index':
[{'name': 'wrds_sub_date_idx', 'column': 'date'},
{'name': 'wrds_sub_gvkey_idx', 'column': 'gvkey'}]
},
#
# IBES
#
{
'rows_to_interpret': 100,
'schema': 'ibes',
'table': 'crsp_ibes_link',
'file_path': '/Users/alex/Desktop/WRDS/IBES/IBES CRSP Link/luhmjdovofexjxwg.csv',
'alter_type': {'sdate': ['timestamp', '%Y%m%d'],
'edate': ['timestamp', '%Y%m%d']},
'index':
[{'name': 'crsp_ibes_permno_idx', 'column': 'permno'},
{'name': 'crsp_ibes_ticker_idx', 'column': 'ticker'},
{'name': 'crsp_ibes_sdate_idx', 'column': 'sdate'},
{'name': 'crsp_ibes_edate_idx', 'column': 'edate'}]
},
{
'rows_to_interpret': 5000,
'table': 'summary_price_target',
'schema': 'ibes',
'file_path': '/Users/alex/Desktop/WRDS/IBES/IBES Academic/Summary History/Price Target/lyrvpqbb4tg2lbv0.csv',
'rename': {'STATPERS': 'date'},
'alter_type': {'DATE': ['timestamp', '%Y%m%d']},
'index': [{'name': 'ibes_spt_date_idx', 'column': 'date'},
{'name': 'ibes_spt_ticker_idx', 'column': 'ticker'},
{'name': 'ibes_spt_usfirm_idx', 'column': 'usfirm'},
{'name': 'ibes_spt_curr_idx', 'column': 'curr'}]
},
{
'rows_to_interpret': 50000,
'table': 'summary_statistics',
'schema': 'ibes',
'file_path': '/Users/alex/Desktop/WRDS/IBES/IBES Academic/Summary History/Summary Statistics/1fhrqwpqzncqbdp3.csv',
'rename': {'STATPERS': 'date'},
'alter_type': {'DATE': ['timestamp', '%Y%m%d'],
'ANNDATS_ACT': ['timestamp', '%Y%m%d'],
'FPEDATS': ['timestamp', '%Y%m%d']},
'index': [{'name': 'ibes_ss_date_idx', 'column': 'date'},
{'name': 'ibes_ss_ticker_idx', 'column': 'ticker'},
{'name': 'ibes_ss_usfirm_idx', 'column': 'usfirm'},
{'name': 'ibes_ss_currcode_idx', 'column': 'curcode'},
{'name': 'ibes_ss_measure_idx', 'column': 'measure'}]
},
{
'rows_to_interpret': 50000,
'table': 'detail',
'schema': 'ibes',
'file_path': '/Users/alex/Desktop/WRDS/IBES/IBES Academic/Detail History/Detail File WIth Actuals/Detail File With Actuals 197001-202108.csv.gz',
'rename': {'ACTDATS': 'date'},
'alter_type': {'DATE': ['timestamp', '%Y%m%d'],
'FPEDATS': ['timestamp', '%Y%m%d'],
'REVDATS': ['timestamp', '%Y%m%d'],
'ANNDATS': ['timestamp', '%Y%m%d'],
'ANNDATS_ACT': ['timestamp', '%Y%m%d'],
'ACTDATS_ACT': ['timestamp', '%Y%m%d']},
'index': [{'name': 'ibes_ss_date_idx', 'column': 'date'},
{'name': 'ibes_ss_ticker_idx', 'column': 'ticker'},
{'name': 'ibes_ss_usfirm_idx', 'column': 'usfirm'},
{'name': 'ibes_ss_measure_idx', 'column': 'measure'},
{'name': 'ibes_ss_fpi_idx', 'column': 'fpi'}]
}
]
# building the tables, chunking the tables to insert bc if we dont then them tmep will balloon in size
for inner_tbl_list in [tbls[i:i + 3] for i in range(0, len(tbls), 3)]:
IngestDataBase().ingest(inner_tbl_list, drop, rows_to_interpret=2000)
# building crsp universes
crsp_us_universe(max_rank=500, rebuild_mc_ranking=True)
crsp_us_universe(max_rank=1000)
crsp_us_universe(max_rank=3000)
crsp_us_universe(min_rank=1000, max_rank=3000)
# building compustat universes
compustat_us_universe(max_rank=500, rebuild_mc_ranking=True)
compustat_us_universe(max_rank=1000)
compustat_us_universe(max_rank=3000)
compustat_us_universe(min_rank=1000, max_rank=3000)
# clearing the etf universe cache
clear_cache()
| 50.105072 | 182 | 0.472051 |
46cc08f8ebe5f9e9ae77869e33fe70e8b7670cac | 1,008 | py | Python | tests/data23/recipe-414879.py | JohannesBuchner/pystrict3 | f442a89ac6a23f4323daed8ef829d8e9e1197f90 | [
"BSD-2-Clause"
] | 1 | 2020-06-05T08:53:26.000Z | 2020-06-05T08:53:26.000Z | tests/data23/recipe-414879.py | JohannesBuchner/pystrict3 | f442a89ac6a23f4323daed8ef829d8e9e1197f90 | [
"BSD-2-Clause"
] | 1 | 2020-06-04T13:47:19.000Z | 2020-06-04T13:47:57.000Z | tests/data23/recipe-414879.py | JohannesBuchner/pystrict3 | f442a89ac6a23f4323daed8ef829d8e9e1197f90 | [
"BSD-2-Clause"
] | 1 | 2020-11-07T17:02:46.000Z | 2020-11-07T17:02:46.000Z | import ctypes
ODBC_ADD_DSN = 1 # Add data source
ODBC_CONFIG_DSN = 2 # Configure (edit) data source
ODBC_REMOVE_DSN = 3 # Remove data source
ODBC_ADD_SYS_DSN = 4 # add a system DSN
ODBC_CONFIG_SYS_DSN = 5 # Configure a system DSN
ODBC_REMOVE_SYS_DSN = 6 # remove a system DSN
def create_sys_dsn(driver, **kw):
"""Create a system DSN
Parameters:
driver - ODBC driver name
kw - Driver attributes
Returns:
0 - DSN not created
1 - DSN created
"""
nul = chr(0)
attributes = []
for attr in list(kw.keys()):
attributes.append("%s=%s" % (attr, kw[attr]))
return ctypes.windll.ODBCCP32.SQLConfigDataSource(0, ODBC_ADD_SYS_DSN, driver, nul.join(attributes))
if __name__ == "__main__":
if create_sys_dsn("SQL Server",SERVER="(local)", DESCRIPTION="Northwind SQL Server DSN", DSN="NorthwindDSN", Database="Northwind", Trusted_Connection="Yes"):
print("DSN created")
else:
print("DSN not created")
| 32.516129 | 161 | 0.654762 |
7dd2b64c2193625d7e77a285f12740b15be3f26e | 8,371 | py | Python | frappe/desk/form/document_follow.py | naderelabed/frappe | 4d6fefaf6b5af594180c0f9f31c7e28e6f514348 | [
"MIT"
] | null | null | null | frappe/desk/form/document_follow.py | naderelabed/frappe | 4d6fefaf6b5af594180c0f9f31c7e28e6f514348 | [
"MIT"
] | null | null | null | frappe/desk/form/document_follow.py | naderelabed/frappe | 4d6fefaf6b5af594180c0f9f31c7e28e6f514348 | [
"MIT"
] | null | null | null | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe
import frappe.utils
from frappe.utils import get_url_to_form
from frappe.model import log_types
from frappe import _
from frappe.query_builder import DocType
@frappe.whitelist()
def update_follow(doctype, doc_name, following):
if following:
return follow_document(doctype, doc_name, frappe.session.user)
else:
return unfollow_document(doctype, doc_name, frappe.session.user)
@frappe.whitelist()
def follow_document(doctype, doc_name, user):
'''
param:
Doctype name
doc name
user email
condition:
avoided for some doctype
follow only if track changes are set to 1
'''
if (doctype in ("Communication", "ToDo", "Email Unsubscribe", "File", "Comment", "Email Account", "Email Domain")
or doctype in log_types):
return
if ((not frappe.get_meta(doctype).track_changes)
or user == "Administrator"):
return
if not frappe.db.get_value("User", user, "document_follow_notify", ignore=True, cache=True):
return
if not is_document_followed(doctype, doc_name, user):
doc = frappe.new_doc("Document Follow")
doc.update({
"ref_doctype": doctype,
"ref_docname": doc_name,
"user": user
})
doc.save()
return doc
@frappe.whitelist()
def unfollow_document(doctype, doc_name, user):
doc = frappe.get_all(
"Document Follow",
filters={
"ref_doctype": doctype,
"ref_docname": doc_name,
"user": user
},
fields=["name"],
limit=1
)
if doc:
frappe.delete_doc("Document Follow", doc[0].name)
return 1
return 0
def get_message(doc_name, doctype, frequency, user):
activity_list = get_version(doctype, doc_name, frequency, user) + get_comments(doctype, doc_name, frequency, user)
return sorted(activity_list, key=lambda k: k["time"], reverse=True)
def send_email_alert(receiver, docinfo, timeline):
if receiver:
frappe.sendmail(
subject=_("Document Follow Notification"),
recipients=[receiver],
template="document_follow",
args={
"docinfo": docinfo,
"timeline": timeline,
}
)
def send_document_follow_mails(frequency):
'''
param:
frequency for sanding mails
task:
set receiver according to frequency
group document list according to user
get changes, activity, comments on doctype
call method to send mail
'''
user_list = get_user_list(frequency)
for user in user_list:
message, valid_document_follows = get_message_for_user(frequency, user)
if message:
send_email_alert(user, valid_document_follows, message)
# send an email if we have already spent resources creating the message
# nosemgrep
frappe.db.commit()
def get_user_list(frequency):
DocumentFollow = DocType('Document Follow')
User = DocType('User')
return (frappe.qb.from_(DocumentFollow).join(User)
.on(DocumentFollow.user == User.name)
.where(User.document_follow_notify == 1)
.where(User.document_follow_frequency == frequency)
.select(DocumentFollow.user)
.groupby(DocumentFollow.user)).run(pluck="user")
def get_message_for_user(frequency, user):
message = []
latest_document_follows = get_document_followed_by_user(user)
valid_document_follows = []
for document_follow in latest_document_follows:
content = get_message(document_follow.ref_docname, document_follow.ref_doctype, frequency, user)
if content:
message = message + content
valid_document_follows.append({
"reference_docname": document_follow.ref_docname,
"reference_doctype": document_follow.ref_doctype,
"reference_url": get_url_to_form(document_follow.ref_doctype, document_follow.ref_docname)
})
return message, valid_document_follows
def get_document_followed_by_user(user):
DocumentFollow = DocType('Document Follow')
# at max 20 documents are sent for each user
return (frappe.qb.from_(DocumentFollow)
.where(DocumentFollow.user == user)
.select(DocumentFollow.ref_doctype, DocumentFollow.ref_docname)
.orderby(DocumentFollow.modified)
.limit(20)).run(as_dict=True)
def get_version(doctype, doc_name, frequency, user):
timeline = []
filters = get_filters("docname", doc_name, frequency, user)
version = frappe.get_all("Version",
filters=filters,
fields=["ref_doctype", "data", "modified", "modified", "modified_by"]
)
if version:
for v in version:
change = frappe.parse_json(v.data)
time = frappe.utils.format_datetime(v.modified, "hh:mm a")
timeline_items = []
if change.changed:
timeline_items = get_field_changed(change.changed, time, doctype, doc_name, v)
if change.row_changed:
timeline_items = get_row_changed(change.row_changed, time, doctype, doc_name, v)
if change.added:
timeline_items = get_added_row(change.added, time, doctype, doc_name, v)
timeline = timeline + timeline_items
return timeline
def get_comments(doctype, doc_name, frequency, user):
from html2text import html2text
timeline = []
filters = get_filters("reference_name", doc_name, frequency, user)
comments = frappe.get_all("Comment",
filters=filters,
fields=["content", "modified", "modified_by", "comment_type"]
)
for comment in comments:
if comment.comment_type == "Like":
by = ''' By : <b>{0}<b>'''.format(comment.modified_by)
elif comment.comment_type == "Comment":
by = '''Commented by : <b>{0}<b>'''.format(comment.modified_by)
else:
by = ''
time = frappe.utils.format_datetime(comment.modified, "hh:mm a")
timeline.append({
"time": comment.modified,
"data": {
"time": time,
"comment": html2text(str(comment.content)),
"by": by
},
"doctype": doctype,
"doc_name": doc_name,
"type": "comment"
})
return timeline
def is_document_followed(doctype, doc_name, user):
return frappe.db.exists(
"Document Follow",
{
"ref_doctype": doctype,
"ref_docname": doc_name,
"user": user
}
)
@frappe.whitelist()
def get_follow_users(doctype, doc_name):
return frappe.get_all(
"Document Follow",
filters={
"ref_doctype": doctype,
"ref_docname":doc_name
},
fields=["user"]
)
def get_row_changed(row_changed, time, doctype, doc_name, v):
from html2text import html2text
items = []
for d in row_changed:
d[2] = d[2] if d[2] else ' '
d[0] = d[0] if d[0] else ' '
d[3][0][1] = d[3][0][1] if d[3][0][1] else ' '
items.append({
"time": v.modified,
"data": {
"time": time,
"table_field": d[0],
"row": str(d[1]),
"field": d[3][0][0],
"from": html2text(str(d[3][0][1])),
"to": html2text(str(d[3][0][2]))
},
"doctype": doctype,
"doc_name": doc_name,
"type": "row changed",
"by": v.modified_by
})
return items
def get_added_row(added, time, doctype, doc_name, v):
items = []
for d in added:
items.append({
"time": v.modified,
"data": {
"to": d[0],
"time": time
},
"doctype": doctype,
"doc_name": doc_name,
"type": "row added",
"by": v.modified_by
})
return items
def get_field_changed(changed, time, doctype, doc_name, v):
from html2text import html2text
items = []
for d in changed:
d[1] = d[1] if d[1] else ' '
d[2] = d[2] if d[2] else ' '
d[0] = d[0] if d[0] else ' '
items.append({
"time": v.modified,
"data": {
"time": time,
"field": d[0],
"from": html2text(str(d[1])),
"to": html2text(str(d[2]))
},
"doctype": doctype,
"doc_name": doc_name,
"type": "field changed",
"by": v.modified_by
})
return items
def send_hourly_updates():
send_document_follow_mails("Hourly")
def send_daily_updates():
send_document_follow_mails("Daily")
def send_weekly_updates():
send_document_follow_mails("Weekly")
def get_filters(search_by, name, frequency, user):
filters = []
if frequency == "Weekly":
filters = [
[search_by, "=", name],
["modified", ">", frappe.utils.add_days(frappe.utils.nowdate(),-7)],
["modified", "<", frappe.utils.nowdate()],
["modified_by", "!=", user]
]
elif frequency == "Daily":
filters = [
[search_by, "=", name],
["modified", ">", frappe.utils.add_days(frappe.utils.nowdate(),-1)],
["modified", "<", frappe.utils.nowdate()],
["modified_by", "!=", user]
]
elif frequency == "Hourly":
filters = [
[search_by, "=", name],
["modified", ">", frappe.utils.add_to_date(frappe.utils.now_datetime(), hours=-1)],
["modified", "<", frappe.utils.now_datetime()],
["modified_by", "!=", user]
]
return filters
| 26.490506 | 115 | 0.687254 |
bb8f9b466484e7d22770dafee69a3a588ee9248c | 2,343 | py | Python | setup.py | Bduz/safelife | cab43b41a5eb73e3f0d27d0435b6e30973b770b2 | [
"Apache-2.0"
] | null | null | null | setup.py | Bduz/safelife | cab43b41a5eb73e3f0d27d0435b6e30973b770b2 | [
"Apache-2.0"
] | null | null | null | setup.py | Bduz/safelife | cab43b41a5eb73e3f0d27d0435b6e30973b770b2 | [
"Apache-2.0"
] | null | null | null | import os
import glob
import setuptools
import platform
class get_numpy_include(object):
def __str__(self):
import numpy
return numpy.get_include()
base_dir = os.path.abspath(os.path.dirname(__file__))
ext_path = os.path.join(base_dir, 'safelife', 'speedups_src')
levels_path = os.path.join(base_dir, 'safelife', 'levels')
data_files = ['*.png']
data_files += glob.glob(os.path.join(levels_path, '**', '*.npz'), recursive=True)
data_files += glob.glob(os.path.join(levels_path, '**', '*.yaml'), recursive=True)
with open(os.path.join(base_dir, "README.md"), "rt", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name='safelife',
version='1.2.1',
author="Carroll L. Wainwright",
author_email="carroll@partnershiponai.org",
description="Safety benchmarks for reinforcement learning",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/PartnershipOnAI/safelife",
packages=['safelife'],
package_data={'safelife': data_files},
install_requires=[
"pyemd==0.5.1",
"numpy>=1.18.0",
"scipy>=1.0.0",
"gym>=0.12.5",
"imageio>=2.5.0",
"pyglet>=1.3.2,<=1.5.0",
"pyyaml>=3.12",
],
ext_modules=[
setuptools.Extension(
'safelife.speedups',
py_limited_api=True,
define_macros=[
('PY_ARRAY_UNIQUE_SYMBOL', 'safelife_speedups'),
('NPY_NO_DEPRECATED_API', 'NPY_1_11_API_VERSION'),
('Py_LIMITED_API', '0x03060000'),
],
include_dirs=[ext_path, get_numpy_include()],
sources=glob.glob(os.path.join(ext_path, '*.c')),
extra_compile_args=[
'-O3',
'-Wno-shorten-64-to-32',
'-Wno-c++11-extensions',
'-Wvla',
] if platform.system() != 'Windows' else []
),
],
entry_points={
'console_scripts': [
'safelife = safelife.__main__:run',
],
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
| 31.24 | 82 | 0.588988 |
5d3e361ac660a5c9175721fc4a8b66d5c81a6b64 | 23,743 | py | Python | tests/test_core/test_models.py | dinojugosloven/pymalcolm | 0b856ee1113efdb42f2f3b15986f8ac5f9e1b35a | [
"Apache-2.0"
] | null | null | null | tests/test_core/test_models.py | dinojugosloven/pymalcolm | 0b856ee1113efdb42f2f3b15986f8ac5f9e1b35a | [
"Apache-2.0"
] | null | null | null | tests/test_core/test_models.py | dinojugosloven/pymalcolm | 0b856ee1113efdb42f2f3b15986f8ac5f9e1b35a | [
"Apache-2.0"
] | null | null | null | from collections import OrderedDict
import unittest
import numpy as np
from mock import Mock
from annotypes import Serializable
from malcolm.core import BlockModel, StringMeta, Alarm, \
AlarmSeverity, AlarmStatus, TimeStamp, VMeta, TableMeta, StringArrayMeta, \
NumberMeta, NumberArrayMeta, MethodModel, ChoiceMeta, ChoiceArrayMeta, \
BooleanMeta, BooleanArrayMeta
from malcolm.core.models import NTTable, MapMeta, BlockMeta, NTScalar, \
Meta, MethodMeta, MethodLog
from malcolm.core.notifier import DummyNotifier
class TestAttribute(unittest.TestCase):
def setUp(self):
self.meta = StringMeta()
self.o = self.meta.create_attribute_model()
def test_init(self):
self.assertIs(self.o.meta, self.meta)
assert self.o.value == ""
assert self.o.typeid == "epics:nt/NTScalar:1.0"
def test_set_value(self):
value = "test_value"
self.o.set_value(value)
assert self.o.value == value
def test_set_alarm(self):
alarm = Alarm(
AlarmSeverity.MAJOR_ALARM, AlarmStatus.DEVICE_STATUS, "bad")
self.o.set_alarm(alarm)
assert self.o.alarm == alarm
def test_set_timeStamp(self):
timeStamp = TimeStamp()
self.o.set_timeStamp(timeStamp)
assert self.o.timeStamp == timeStamp
class TestNTScalar(unittest.TestCase):
def setUp(self):
self.serialized = OrderedDict()
self.serialized["typeid"] = "epics:nt/NTScalar:1.0"
self.serialized["value"] = "some string"
self.serialized["alarm"] = Alarm().to_dict()
self.serialized["timeStamp"] = TimeStamp().to_dict()
self.serialized["meta"] = StringMeta("desc").to_dict()
def test_to_dict(self):
a = StringMeta("desc").create_attribute_model()
a.set_value("some string")
a.set_timeStamp(self.serialized["timeStamp"])
assert a.to_dict() == self.serialized
def test_from_dict(self):
a = NTScalar.from_dict(self.serialized)
assert a.meta.to_dict() == StringMeta("desc").to_dict()
assert a.value == "some string"
class TestBlockMeta(unittest.TestCase):
def setUp(self):
self.serialized = OrderedDict()
self.serialized["typeid"] = "malcolm:core/BlockMeta:1.0"
self.serialized["description"] = "desc"
self.serialized["tags"] = []
self.serialized["writeable"] = True
self.serialized["label"] = ""
self.serialized["fields"] = []
def test_to_dict(self):
m = BlockMeta("desc")
assert m.to_dict() == self.serialized
def test_from_dict(self):
m = BlockMeta.from_dict(self.serialized)
assert m.description == "desc"
assert m.tags == []
assert m.writeable is True
assert m.label == ""
class TestBlockModel(unittest.TestCase):
def setUp(self):
self.attr = StringMeta().create_attribute_model()
self.method = MethodModel()
self.o = BlockModel()
self.o.set_endpoint_data("attr", self.attr)
self.o.set_endpoint_data("method", self.method)
def test_init(self):
assert self.o.method == self.method
assert self.o.attr == self.attr
assert self.o.typeid == "malcolm:core/Block:1.0"
assert list(self.o) == ["meta", "attr", "method"]
def test_remove_endpoint(self):
self.o.remove_endpoint("attr")
assert self.o.method == self.method
assert list(self.o) == ["meta", "method"]
assert self.o.meta.fields == ["method"]
with self.assertRaises(AttributeError):
a = self.o.attr
self.o.set_endpoint_data("attr", self.attr)
assert list(self.o) == ["meta", "method", "attr"]
assert self.o.meta.fields == ["method", "attr"]
assert self.o.attr == self.attr
class TestBooleanArrayMeta(unittest.TestCase):
def setUp(self):
self.meta = BooleanArrayMeta("test description")
def test_init(self):
assert "test description" == self.meta.description
assert self.meta.label == ""
assert self.meta.typeid == "malcolm:core/BooleanArrayMeta:1.0"
def test_validate_none(self):
assert list(self.meta.validate(None)) == []
def test_validate_array(self):
array = ["True", "", True, False, 1, 0]
assert (
[True, False, True, False, True, False]) == (
list(self.meta.validate(array)))
def test_not_iterable(self):
value = True
assert self.meta.validate(value) == [True]
def test_null_element_raises(self):
array = ["test", None]
assert (
[True, False]) == list(self.meta.validate(array))
class TestBooleanMeta(unittest.TestCase):
def setUp(self):
self.boolean_meta = BooleanMeta("test description")
def test_given_value_str_then_cast_and_return(self):
response = self.boolean_meta.validate("TestValue")
assert response
response = self.boolean_meta.validate("")
assert not response
def test_given_value_int_then_cast_and_return(self):
response = self.boolean_meta.validate(15)
assert response
response = self.boolean_meta.validate(0)
assert not response
def test_given_value_boolean_then_cast_and_return(self):
response = self.boolean_meta.validate(True)
assert response
response = self.boolean_meta.validate(False)
assert not response
def test_given_value_None_then_return(self):
response = self.boolean_meta.validate(None)
assert False == response
class TestChoiceArrayMeta(unittest.TestCase):
def setUp(self):
self.meta = ChoiceArrayMeta("test description", ["a", "b"])
def test_init(self):
self.meta = ChoiceArrayMeta("test description", ["a", "b"])
assert "test description" == self.meta.description
assert self.meta.label == ""
assert self.meta.typeid == "malcolm:core/ChoiceArrayMeta:1.0"
assert self.meta.choices == ["a", "b"]
def test_validate_none(self):
assert self.meta.validate(None) == []
def test_validate(self):
response = self.meta.validate(["b", "a"])
assert ["b", "a"] == response
def test_not_iterable_raises(self):
value = "abb"
with self.assertRaises(ValueError):
self.meta.validate(value)
def test_null_element_maps_default(self):
array = ["b", None]
assert self.meta.validate(array) == ["b", "a"]
def test_invalid_choice_raises(self):
with self.assertRaises(ValueError):
self.meta.validate(["a", "x"])
class TestChoiceMeta(unittest.TestCase):
def setUp(self):
self.choice_meta = ChoiceMeta(
"test description", ["a", "b"])
self.serialized = OrderedDict()
self.serialized["typeid"] = "malcolm:core/ChoiceMeta:1.0"
self.serialized["description"] = "desc"
self.serialized["choices"] = ["a", "b"]
self.serialized["tags"] = []
self.serialized["writeable"] = False
self.serialized["label"] = "name"
def test_init(self):
self.choice_meta = ChoiceMeta(
"test description", ["a", "b"])
assert (
"test description") == self.choice_meta.description
assert (
self.choice_meta.typeid) == "malcolm:core/ChoiceMeta:1.0"
assert (
self.choice_meta.label) == ""
assert (
self.choice_meta.choices) == ["a", "b"]
def test_given_valid_value_then_return(self):
response = self.choice_meta.validate("a")
assert "a" == response
def test_int_validate(self):
response = self.choice_meta.validate(1)
assert "b" == response
def test_None_valid(self):
response = self.choice_meta.validate(None)
assert "a" == response
def test_given_invalid_value_then_raises(self):
with self.assertRaises(ValueError):
self.choice_meta.validate('badname')
def test_set_choices(self):
self.choice_meta.set_choices(["4"])
assert ["4"] == self.choice_meta.choices
def test_to_dict(self):
bm = ChoiceMeta("desc", ["a", "b"], label="name")
assert bm.to_dict() == self.serialized
def test_from_dict(self):
bm = ChoiceMeta.from_dict(self.serialized)
assert type(bm) == ChoiceMeta
assert bm.description == "desc"
assert bm.choices == ["a", "b"]
assert bm.tags == []
assert not bm.writeable
assert bm.label == "name"
class TestMethodMeta(unittest.TestCase):
def test_init(self):
m = MethodMeta(description="test_description")
assert "test_description" == m.description
assert "malcolm:core/MethodMeta:1.1" == m.typeid
assert "" == m.label
def test_set_label(self):
m = MethodMeta(description="test_description")
m.set_label("new_label")
assert "new_label" == m.label
def setUp(self):
self.serialized = OrderedDict()
self.serialized["typeid"] = "malcolm:core/MethodMeta:1.1"
self.takes = MapMeta()
self.takes.set_elements({"in_attr": StringMeta("desc")})
self.serialized["takes"] = self.takes.to_dict()
self.serialized["defaults"] = OrderedDict({"in_attr": "default"})
self.serialized["description"] = "test_description"
self.serialized["tags"] = []
self.serialized["writeable"] = False
self.serialized["label"] = ""
self.serialized["returns"] = MapMeta().to_dict()
def test_to_dict(self):
m = MethodMeta(description="test_description")
m.set_takes(self.takes)
m.set_defaults(self.serialized["defaults"])
assert m.to_dict() == self.serialized
def test_from_dict(self):
m = MethodMeta.from_dict(self.serialized)
assert m.takes.to_dict() == self.takes.to_dict()
assert m.defaults == self.serialized["defaults"]
assert m.tags == []
assert m.writeable is False
assert m.label == ""
assert m.returns.to_dict() == MapMeta().to_dict()
class TestMethodLog(unittest.TestCase):
def setUp(self):
self.serialized = OrderedDict()
self.serialized["typeid"] = "malcolm:core/MethodLog:1.0"
self.serialized["value"] = dict(a=1)
self.serialized["present"] = ["a"]
self.serialized["alarm"] = Alarm.ok.to_dict()
self.serialized["timeStamp"] = TimeStamp.zero.to_dict()
def test_to_dict(self):
m = MethodLog(value=dict(a=1), present=["a"], timeStamp=TimeStamp.zero)
assert m.to_dict() == self.serialized
def test_from_dict(self):
m = MethodLog.from_dict(self.serialized)
assert m.value == dict(a=1)
assert m.present == ["a"]
assert m.alarm.to_dict() == Alarm.ok.to_dict()
assert m.timeStamp.to_dict() == TimeStamp.zero.to_dict()
class TestMapMeta(unittest.TestCase):
def test_values_set(self):
self.assertIsInstance(self.mm.elements, dict)
assert len(self.mm.elements) == 0
assert self.mm.typeid == "malcolm:core/MapMeta:1.0"
def test_set_elements(self):
els = dict(sam=StringArrayMeta())
self.mm.set_elements(els)
assert self.mm.elements == els
def test_set_required(self):
self.test_set_elements()
req = ("sam",)
self.mm.set_required(req)
assert self.mm.required == req
def setUp(self):
self.mm = MapMeta()
self.sam = StringArrayMeta()
self.sam.label = "C1"
self.serialized = OrderedDict()
self.serialized["typeid"] = "malcolm:core/MapMeta:1.0"
self.serialized["elements"] = dict(c1=self.sam.to_dict())
self.serialized["required"] = ["c1"]
def test_to_dict(self):
tm = MapMeta()
tm.set_elements(dict(c1=self.sam))
tm.set_required(["c1"])
assert tm.to_dict() == self.serialized
def test_from_dict(self):
tm = MapMeta.from_dict(self.serialized)
assert len(tm.elements) == 1
expected = self.sam.to_dict()
assert tm.elements["c1"].to_dict() == expected
class TestMeta(unittest.TestCase):
def setUp(self):
self.o = Meta("desc")
notifier = DummyNotifier()
notifier.add_squashed_change = Mock()
self.o.set_notifier_path(notifier, ["path"])
self.serialized = OrderedDict()
self.serialized["typeid"] = "filled_in_by_subclass"
self.serialized["description"] = "desc"
self.serialized["tags"] = []
self.serialized["writeable"] = False
self.serialized["label"] = ""
def test_set_description(self):
description = "desc2"
assert self.o.set_description(description) == description
assert self.o.description == description
self.o.notifier.add_squashed_change.assert_called_once_with(
["path", "description"], description)
def test_set_tags(self):
tags = ("widget:textinput",)
assert self.o.set_tags(tags) == tags
assert self.o.tags == tags
self.o.notifier.add_squashed_change.assert_called_once_with(
["path", "tags"], tags)
def test_set_writeable(self):
writeable = True
assert self.o.set_writeable(writeable) == writeable
assert self.o.writeable == writeable
self.o.notifier.add_squashed_change.assert_called_once_with(
["path", "writeable"], writeable)
def test_set_label(self):
label = "my label"
assert self.o.set_label(label) == label
assert self.o.label == label
self.o.notifier.add_squashed_change.assert_called_once_with(
["path", "label"], label)
def test_to_dict(self):
m = Meta("desc")
m.typeid = "filled_in_by_subclass"
assert m.to_dict() == self.serialized
class TestNumberArrayMeta(unittest.TestCase):
def test_numpy_array(self):
nm = NumberArrayMeta("float64")
values = np.array([1.2, 3.4, 5.6])
response = nm.validate(values)
for i, value in enumerate(response):
assert values[i] == value
def test_numpy_array_wrong_type_raises(self):
nm = NumberArrayMeta("float64")
values = "[1.2, 3.4, 5.6]"
with self.assertRaises(ValueError):
nm.validate(values)
def test_numpy_array_wrong_number_type_raises(self):
nm = NumberArrayMeta("int32")
values = np.array([1.2, 3.4, 5.6])
with self.assertRaises(AssertionError):
nm.validate(values)
def test_float_against_float64(self):
nm = NumberArrayMeta("float64")
values = [1.2, 3.4, 5.6]
response = nm.validate(values)
for i, value in enumerate(response):
assert values[i] == value
def test_float_against_float32(self):
nm = NumberArrayMeta("float32")
values = [1.2, 3.4, 5.6]
response = nm.validate(values)
for i, value in enumerate(response):
self.assertAlmostEqual(values[i], response[i], places=5)
def test_int_against_float(self):
nm = NumberArrayMeta("float32")
values = [1, 2, 3]
response = nm.validate(values)
for i, value in enumerate(response):
assert values[i] == value
nm = NumberArrayMeta("float64")
values = [1, 2, 3]
response = nm.validate(values)
for i, value in enumerate(response):
assert values[i] == value
def test_int_against_int(self):
nm = NumberArrayMeta("int32")
values = [1, 2, 3]
response = nm.validate(values)
for i, value in enumerate(response):
assert values[i] == value
def test_float_against_int_floors(self):
nm = NumberArrayMeta("int32")
actual = list(nm.validate([1.2, 34, 56]))
expected = [1, 34, 56]
assert actual == expected
def test_null_element_zero(self):
nm = NumberArrayMeta("float64")
actual = nm.validate([1.2, None, 1.3])
assert actual[0] == 1.2
assert np.isnan(actual[1])
assert actual[2] == 1.3
def test_none_validates(self):
nm = NumberArrayMeta("int32")
assert list(nm.validate(None)) == []
class TestNumberMeta(unittest.TestCase):
def test_init(self):
nm = NumberMeta("float32")
assert nm.typeid == "malcolm:core/NumberMeta:1.0"
assert nm.dtype == "float32"
assert nm.label == ""
def test_float_against_float32(self):
nm = NumberMeta("float32")
self.assertAlmostEqual(123.456, nm.validate(123.456), places=5)
def test_float_against_float64(self):
nm = NumberMeta("float64")
assert 123.456 == nm.validate(123.456)
def test_int_against_float(self):
nm = NumberMeta("float64")
assert 123 == nm.validate(123)
def test_int_against_int(self):
nm = NumberMeta("int32")
assert 123 == nm.validate(123)
def test_float_to_int_truncates(self):
nm = NumberMeta("int32")
assert nm.validate(123.6) == 123
def test_none_validates(self):
nm = NumberMeta("int32")
assert 0 == nm.validate(None)
def test_unsigned_validates(self):
nm = NumberMeta("uint32")
assert nm.validate("22") == 22
assert nm.validate(-22) == 2 ** 32 - 22
def setUp(self):
self.serialized = OrderedDict()
self.serialized["typeid"] = "malcolm:core/NumberMeta:1.0"
self.serialized["dtype"] = "float64"
self.serialized["description"] = "desc"
self.serialized["tags"] = []
self.serialized["writeable"] = False
self.serialized["label"] = "name"
display = OrderedDict()
display["typeid"] = "display_t"
display["limitLow"] = 0
display["limitHigh"] = 0
display["description"] = ""
display["precision"] = 8
display["units"] = ""
self.serialized["display"] = display
def test_to_dict(self):
nm = NumberMeta("float64", "desc", label="name")
assert nm.to_dict() == self.serialized
def test_from_dict(self):
nm = NumberMeta.from_dict(self.serialized)
assert type(nm) == NumberMeta
assert nm.description == "desc"
assert nm.dtype == "float64"
assert nm.tags == []
assert not nm.writeable
assert nm.label == "name"
class TestStringArrayMeta(unittest.TestCase):
def setUp(self):
self.meta = StringArrayMeta("test description")
def test_init(self):
assert "test description" == self.meta.description
assert self.meta.label == ""
assert self.meta.typeid == "malcolm:core/StringArrayMeta:1.0"
def test_validate_none(self):
assert self.meta.validate(None) == []
def test_validate_array(self):
array = ["test_string", 123, 123.456]
with self.assertRaises(AssertionError):
self.meta.validate(array)
def test_not_iterable_raises(self):
value = 12346
with self.assertRaises(AssertionError):
self.meta.validate(value)
def test_null_element_raises(self):
array = ["test", None]
with self.assertRaises(AssertionError):
self.meta.validate(array)
class TestStringMeta(unittest.TestCase):
def setUp(self):
self.string_meta = StringMeta("test string description")
def test_given_value_str_then_return(self):
response = self.string_meta.validate("TestValue")
assert "TestValue" == response
def test_given_value_int_then_cast_and_return(self):
response = self.string_meta.validate(15)
assert "15" == response
def test_given_value_float_then_cast_and_return(self):
response = self.string_meta.validate(12.8)
assert "12.8" == response
def test_given_value_None_then_return(self):
response = self.string_meta.validate(None)
assert "" == response
class TestTableMeta(unittest.TestCase):
def test_init(self):
tm = TableMeta("desc")
assert "desc" == tm.description
assert "malcolm:core/TableMeta:1.0" == tm.typeid
assert [] == tm.tags
assert False == tm.writeable
assert "" == tm.label
def setUp(self):
tm = TableMeta("desc")
self.tm = tm
self.tm.set_elements(dict(c1=StringArrayMeta()))
self.sam = StringArrayMeta()
self.serialized = OrderedDict()
self.serialized["typeid"] = "malcolm:core/TableMeta:1.0"
self.serialized["description"] = "desc"
self.serialized["tags"] = []
self.serialized["writeable"] = True
self.serialized["label"] = "Name"
self.serialized["elements"] = dict(c1=self.sam.to_dict())
def test_set_elements(self):
tm = self.tm
elements = OrderedDict()
elements["col1"] = StringArrayMeta()
elements["col2"] = StringArrayMeta()
tm.set_elements(elements)
assert elements == tm.elements
def test_set_elements_from_serialized(self):
tm = self.tm
elements = OrderedDict()
elements["col1"] = StringArrayMeta().to_dict()
elements["col2"] = StringArrayMeta().to_dict()
tm.set_elements(elements)
assert isinstance(tm.elements["col1"], StringArrayMeta)
assert tm.elements["col1"].to_dict() == elements["col1"]
def test_to_dict(self):
tm = TableMeta("desc")
tm.set_label("Name")
tm.set_writeable(True)
tm.set_elements(dict(c1=self.sam))
assert tm.to_dict() == self.serialized
def test_from_dict(self):
tm = TableMeta.from_dict(self.serialized)
assert tm.description == "desc"
assert len(tm.elements) == 1
assert tm.elements["c1"].to_dict() == self.sam.to_dict()
assert tm.tags == []
assert tm.writeable == True
assert tm.label == "Name"
def test_validate_from_good_table(self):
tm = self.tm
t = tm.table_cls(c1=["me", "me3"])
t_serialized = t.to_dict()
t = tm.validate(t)
assert t.to_dict() == t_serialized
def test_validate_from_serialized(self):
tm = self.tm
serialized = dict(
typeid="anything",
c1=("me", "me3")
)
t = tm.validate(serialized)
assert list(t) == ["c1"]
assert t.c1 == serialized["c1"]
class TestVMeta(unittest.TestCase):
def test_values_after_init(self):
assert "test description" == self.meta.description
assert not self.meta.writeable
def test_given_validate_called_then_raise_error(self):
with self.assertRaises(NotImplementedError):
self.meta.validate(1)
def setUp(self):
self.meta = VMeta("test description")
self.serialized = OrderedDict()
self.serialized["typeid"] = "filled_in_by_subclass"
self.serialized["description"] = "desc"
self.serialized["tags"] = []
self.serialized["writeable"] = True
self.serialized["label"] = "my label"
def test_to_dict(self):
m = VMeta("desc", writeable=True, label="my label")
m.typeid = "filled_in_by_subclass"
assert m.to_dict() == self.serialized
def test_from_dict(self):
@Serializable.register_subclass("filled_in_by_subclass")
class MyVMeta(VMeta):
pass
m = MyVMeta.from_dict(self.serialized)
assert m.description == "desc"
assert m.tags == []
assert m.writeable is True
assert m.label == "my label"
| 32.391542 | 79 | 0.616476 |
dce325f559cbd1c821085bb4a7854c77a1d271e0 | 1,070 | py | Python | setup.py | krakozaure/pyzzy | 17a316c0ced8095b671186c73fb5bf1daa2c140b | [
"MIT"
] | null | null | null | setup.py | krakozaure/pyzzy | 17a316c0ced8095b671186c73fb5bf1daa2c140b | [
"MIT"
] | null | null | null | setup.py | krakozaure/pyzzy | 17a316c0ced8095b671186c73fb5bf1daa2c140b | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pyzzy",
version="0.0.11",
author="krakozaure",
license="MIT License",
author_email="",
description="Set of packages to simplify development in Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/krakozaure/pyzzy",
packages=setuptools.find_packages(exclude=["tests"]),
install_requires=["colorama", "ruamel.yaml", "toml"],
tests_require=["pytest", "tox"],
classifiers=[
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: Implementation :: CPython",
],
)
| 34.516129 | 70 | 0.63271 |
6ad28d6862d99a54dc5a880e2a3de9e6744d4d08 | 4,009 | py | Python | src/design_storage/jwt.py | DD-DeCaF/design-storage | 0c0e07f0dc505eb4a1e4521a87f5f7ac6f879b6d | [
"Apache-2.0"
] | null | null | null | src/design_storage/jwt.py | DD-DeCaF/design-storage | 0c0e07f0dc505eb4a1e4521a87f5f7ac6f879b6d | [
"Apache-2.0"
] | 4 | 2019-01-08T13:43:13.000Z | 2020-04-30T01:43:03.000Z | src/design_storage/jwt.py | DD-DeCaF/design-storage | 0c0e07f0dc505eb4a1e4521a87f5f7ac6f879b6d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018, Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handling and verification of JWT claims."""
import logging
from functools import wraps
from flask import abort, g, request
from jose import jwt
logger = logging.getLogger(__name__)
def init_app(app):
"""Add the jwt decoding middleware to the app."""
@app.before_request
def decode_jwt():
if "Authorization" not in request.headers:
logger.debug("No JWT provided")
g.jwt_valid = False
g.jwt_claims = {"prj": {}}
return
auth = request.headers["Authorization"]
if not auth.startswith("Bearer "):
g.jwt_valid = False
g.jwt_claims = {"prj": {}}
return
try:
_, token = auth.split(" ", 1)
g.jwt_claims = jwt.decode(
token,
app.config["JWT_PUBLIC_KEY"],
app.config["JWT_PUBLIC_KEY"]["alg"],
)
# JSON object names can only be strings. Map project ids to ints for
# easier handling
g.jwt_claims["prj"] = {
int(key): value for key, value in g.jwt_claims["prj"].items()
}
g.jwt_valid = True
logger.debug(f"JWT claims accepted: {g.jwt_claims}")
except (
jwt.JWTError,
jwt.ExpiredSignatureError,
jwt.JWTClaimsError,
) as e:
abort(401, f"JWT authentication failed: {e}")
def jwt_required(function):
"""
Require JWT to be provided.
Use this as a decorator for endpoints requiring JWT to be provided.
"""
@wraps(function)
def wrapper(*args, **kwargs):
if not g.jwt_valid:
abort(401, "JWT authentication required")
return function(*args, **kwargs)
return wrapper
def jwt_require_claim(project_id, required_level):
"""
Require a JWT claim for the given project and access level.
Verify that the current user has access to the given project id, and that
their access level is equal to or higher than the given required level.
Aborts the request if the user does not have sufficient access.
:param project_id: The project ID to verify access for
:param required_level: The required access level (admin, write or read)
:return: None
"""
ACCESS_LEVELS = {
"admin": 3,
"write": 2,
"read": 1,
}
if required_level not in ACCESS_LEVELS.keys():
raise ValueError(f"Invalid claim level '{required_level}'")
logger.debug(
f"Looking for '{required_level}' access to project "
f"'{project_id}' in claims '{g.jwt_claims}'"
)
# Nobody can write to public projects
if project_id is None and required_level != "read":
abort(403, "Public data can not be modified")
try:
claim_level = g.jwt_claims["prj"][project_id]
except KeyError:
# The given project id is not included in the users claims
abort(403, "You do not have access to the requested resource")
# The given project id is included in the claims; verify that the access
# level is sufficient
if ACCESS_LEVELS[claim_level] < ACCESS_LEVELS[required_level]:
abort(
403,
f"This operation requires access level '{required_level}', your "
f"access level is '{claim_level}'",
)
| 31.566929 | 80 | 0.629833 |
505d044daf02ad603fa15d47f1c9d802217306c6 | 1,960 | py | Python | gym/envs/debugging/two_round_nondeterministic_reward.py | zdx3578/gym-zdx | b72e638095e23b256fe72fc38ef45d2ca1652b6c | [
"MIT"
] | null | null | null | gym/envs/debugging/two_round_nondeterministic_reward.py | zdx3578/gym-zdx | b72e638095e23b256fe72fc38ef45d2ca1652b6c | [
"MIT"
] | null | null | null | gym/envs/debugging/two_round_nondeterministic_reward.py | zdx3578/gym-zdx | b72e638095e23b256fe72fc38ef45d2ca1652b6c | [
"MIT"
] | null | null | null | """
Simple environment with known optimal policy and value function.
Action 0 then 0 yields randomly -1 or 1 reward and terminates the session.
Action 0 then 1 yields randomly 0, 0, or 9 reward and terminates the session.
Action 0 then 0 yields randomly 0 or 2 reward and terminates the session.
Action 1 then 1 yields randomly 2 or 3 reward and terminates the session.
Optimal policy: action 0 then 1.
Optimal value function v(observation): (this is a fully observable MDP so observation==state)
v(0)= 3 (you get observation 0 after taking action 0)
v(1)= 2.5 (you get observation 1 after taking action 1)
v(2)= 3 (you get observation 2 in the starting state)
"""
import gym
import random
from gym import spaces
from gym.utils import seeding
class TwoRoundNondeterministicRewardEnv(gym.Env):
def __init__(self):
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Discrete(3)
self._reset()
def _step(self, action):
rewards = [
[
[-1, 1], #expected value 0
[0, 0, 9] #expected value 3. This is the best path.
],
[
[0, 2], #expected value 1
[2, 3] #expected value 2.5
]
]
assert(self.action_space.contains(action))
if self.firstAction is None:
self.firstAction = action
reward = 0
done = False
else:
reward = random.choice(rewards[self.firstAction][action])
done = True
return self._get_obs(), reward, done, {}
def _get_obs(self):
if self.firstAction is None:
return 2
else:
return self.firstAction
def _reset(self):
self.firstAction = None
return self._get_obs()
def _seed(self, seed=None):
seed = seed if seed else seeding.hash_seed(seed) % 2**32
random.seed(seed)
return [seed]
| 29.253731 | 93 | 0.615306 |
6593b32c3daf5dce72ab9a22eb051607e488851d | 613 | py | Python | Unsupervised ML/K-Means Clustering/Test_K.py | marcelkotze007/mk007---ML-Python-library | 307e51762fc821588206440daa2c18a6128f4aec | [
"MIT"
] | null | null | null | Unsupervised ML/K-Means Clustering/Test_K.py | marcelkotze007/mk007---ML-Python-library | 307e51762fc821588206440daa2c18a6128f4aec | [
"MIT"
] | null | null | null | Unsupervised ML/K-Means Clustering/Test_K.py | marcelkotze007/mk007---ML-Python-library | 307e51762fc821588206440daa2c18a6128f4aec | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from K_Means import K_means
def create_data(D = 2, S = 4, N = 900):
mu1 = np.array([0,0])
mu2 = np.array([S,S])
mu3 = np.array([0,S])
X = np.zeros((N, D))
X[:300, :] = np.random.randn(300, D) + mu1
X[300:600, :] = np.random.randn(300, D) + mu2
X[600:, :] = np.random.randn(300, D) + mu3
plt.scatter(X[:,0], X[:, 1])
plt.show()
return X
if __name__ == "__main__":
X = create_data()
model = K_means()
model.fit(X, K=5, max_iter=15,beta=0.3,show_cost=True, show_grid=True, validate="DBI", Y=None)
| 22.703704 | 98 | 0.5677 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.