text
stringlengths 4
1.02M
| meta
dict |
|---|---|
""" Hook specifications for tox.
"""
from pluggy import HookimplMarker
from pluggy import HookspecMarker
hookspec = HookspecMarker("tox")
hookimpl = HookimplMarker("tox")
@hookspec
def tox_addoption(parser):
""" add command line options to the argparse-style parser object."""
@hookspec
def tox_configure(config):
""" called after command line options have been parsed and the ini-file has
been read. Please be aware that the config object layout may change as its
API was not designed yet wrt to providing stability (it was an internal
thing purely before tox-2.0). """
@hookspec(firstresult=True)
def tox_get_python_executable(envconfig):
""" return a python executable for the given python base name.
The first plugin/hook which returns an executable path will determine it.
``envconfig`` is the testenv configuration which contains
per-testenv configuration, notably the ``.envname`` and ``.basepython``
setting.
"""
@hookspec(firstresult=True)
def tox_testenv_create(venv, action):
""" [experimental] perform creation action for this venv.
Some example usage:
- To *add* behavior but still use tox's implementation to set up a
virtualenv, implement this hook but do not return a value (or explicitly
return ``None``).
- To *override* tox's virtualenv creation, implement this hook and return
a non-``None`` value.
.. note:: This api is experimental due to the unstable api of
:class:`tox.venv.VirtualEnv`.
.. note:: This hook uses ``firstresult=True`` (see pluggy_) -- hooks
implementing this will be run until one returns non-``None``.
.. _pluggy: http://pluggy.readthedocs.io/en/latest/#first-result-only
"""
@hookspec(firstresult=True)
def tox_testenv_install_deps(venv, action):
""" [experimental] perform install dependencies action for this venv.
Some example usage:
- To *add* behavior but still use tox's implementation to install
dependencies, implement this hook but do not return a value (or
explicitly return ``None``). One use-case may be to install (or ensure)
non-python dependencies such as debian packages.
- To *override* tox's installation of dependencies, implement this hook
and return a non-``None`` value. One use-case may be to install via
a different installation tool such as `pip-accel`_ or `pip-faster`_.
.. note:: This api is experimental due to the unstable api of
:class:`tox.venv.VirtualEnv`.
.. note:: This hook uses ``firstresult=True`` (see pluggy_) -- hooks
implementing this will be run until one returns non-``None``.
.. _pip-accel: https://github.com/paylogic/pip-accel
.. _pip-faster: https://github.com/Yelp/venv-update
.. _pluggy: http://pluggy.readthedocs.io/en/latest/#first-result-only
"""
@hookspec
def tox_runtest_pre(venv):
""" [experimental] perform arbitrary action before running tests for this venv.
This could be used to indicate that tests for a given venv have started, for instance.
"""
@hookspec(firstresult=True)
def tox_runtest(venv, redirect):
""" [experimental] run the tests for this venv.
.. note:: This hook uses ``firstresult=True`` (see pluggy_) -- hooks
implementing this will be run until one returns non-``None``.
"""
@hookspec
def tox_runtest_post(venv):
""" [experimental] perform arbitrary action after running tests for this venv.
This could be used to have per-venv test reporting of pass/fail status.
"""
|
{
"content_hash": "7010d4acf85b8c1b7bcd2a695b82197c",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 90,
"avg_line_length": 33.904761904761905,
"alnum_prop": 0.696067415730337,
"repo_name": "loechel/tox",
"id": "d139f4094844516f5c631ca5630035d50979d72d",
"size": "3560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tox/hookspecs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "296218"
}
],
"symlink_target": ""
}
|
__all__ = ['FFI', 'VerificationError', 'VerificationMissing', 'CDefError',
'FFIError']
from .api import FFI, CDefError, FFIError
from .ffiplatform import VerificationError, VerificationMissing
__version__ = "1.4.1"
__version_info__ = (1, 4, 1)
# The verifier module file names are based on the CRC32 of a string that
# contains the following version number. It may be older than __version__
# if nothing is clearly incompatible.
__version_verifier_modules__ = "0.8.6"
|
{
"content_hash": "68677e1f1a5a78f9b15d5f2a714f4dd8",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 74,
"avg_line_length": 37.15384615384615,
"alnum_prop": 0.7060041407867494,
"repo_name": "aliyun/oss-ftp",
"id": "4de0c66430cd4adc694fa5e59c5b34d922344957",
"size": "483",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python27/win32/Lib/site-packages/cffi/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "247"
},
{
"name": "C",
"bytes": "439021"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "26115"
},
{
"name": "CSS",
"bytes": "84389"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "HTML",
"bytes": "140138"
},
{
"name": "JavaScript",
"bytes": "5048"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "24513573"
},
{
"name": "Roff",
"bytes": "21"
},
{
"name": "Shell",
"bytes": "7275"
},
{
"name": "Tcl",
"bytes": "2150885"
},
{
"name": "Visual Basic",
"bytes": "529"
}
],
"symlink_target": ""
}
|
"""The `IncrementLogProb` class."""
# Dependency imports
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import identity as identity_bijector
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import log_prob_ratio
from tensorflow_probability.python.internal import callable_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
__all__ = [
'IncrementLogProb',
]
class IncrementLogProb(distribution.AutoCompositeTensorDistribution):
"""A distribution representing an unnormalized measure on a singleton set.
`IncrementLogProb` represents a "factor", which can also be thought of as a
measure of the given size on a sample space consisting of a single element.
Its raison d'être is to provide a computed offset to the log probability of a
`JointDistribution`. A `JointDistribution` containing an `IncrementLogProb`
still represents a measure, but that measure is no longer in general a
probability measure (i.e., the probability may no longer integrate to 1).
Even though sampling from any measure represented by
`IncrementLogProb` is information-free, `IncrementLogProb` retains a
`sample` method for API compatibility with other Distributions.
This `sample` method returns a (batch) shape-[0] Tensor with the
same `dtype` as the `log_prob_increment` argument provided
originally.
"""
def __init__(
self,
log_prob_increment,
validate_args=False,
allow_nan_stats=False, # pylint: disable=unused-argument
log_prob_ratio_fn=None,
name='IncrementLogProb',
log_prob_increment_kwargs=None):
"""Construct a `IncrementLogProb` distribution-like object.
Args:
log_prob_increment: Float Tensor or callable returning a float Tensor. Log
probability/density to increment by.
validate_args: This argument is ignored, but is present because it is used
in certain situations where `Distribution`s are expected.
allow_nan_stats: This argument is ignored, but is present because it is
used in certain situations where `Distribution`s are expected.
log_prob_ratio_fn: Optional callable with signature `(p_kwargs, q_kwargs)
-> log_prob_ratio`, used to implement a custom `p_log_prob_increment -
q_log_prob_increment` computation.
name: Python `str` name prefixed to Ops created by this class.
log_prob_increment_kwargs: Passed to `log_prob_increment` if it is
callable.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
if log_prob_increment_kwargs is None:
log_prob_increment_kwargs = {}
if callable(log_prob_increment):
log_prob_increment_fn = lambda: tensor_util.convert_nonref_to_tensor( # pylint: disable=g-long-lambda
log_prob_increment(**log_prob_increment_kwargs))
spec = callable_util.get_output_spec(log_prob_increment_fn)
else:
if log_prob_increment_kwargs:
raise ValueError('`log_prob_increment_kwargs` is only valid when '
'`log_prob_increment` is callable.')
log_prob_increment = tensor_util.convert_nonref_to_tensor(
log_prob_increment)
log_prob_increment_fn = lambda: log_prob_increment
spec = log_prob_increment
self._log_prob_increment_fn = log_prob_increment_fn
self._log_prob_increment = log_prob_increment
self._dtype = spec.dtype
self._static_batch_shape = spec.shape
self._name = name
self._validate_args = validate_args
self._log_prob_ratio_fn = log_prob_ratio_fn
self._log_prob_increment_kwargs = log_prob_increment_kwargs
super().__init__(
dtype=spec.dtype,
reparameterization_type=reparameterization.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
# It's not obvious how to implement _parameter_properties for this
# distribution, as we cannot determine the batch_ndims or event_ndims for
# values in log_prob_increment_kwargs.
raise NotImplementedError()
@property
def _composite_tensor_nonshape_params(self):
params = ['log_prob_increment_kwargs']
if not callable(self.log_prob_increment):
params.append('log_prob_increment')
return tuple(params)
@property
def _composite_tensor_shape_params(self):
return ()
@property
def log_prob_increment(self):
return self._log_prob_increment
@property
def log_prob_increment_kwargs(self):
return self._log_prob_increment_kwargs
def _log_prob(self, x):
"""Log probability mass function."""
# TODO(axch): This method should do some shape checking on its argument to
# be really consistent with the Distribution contract. If the input Tensor's
# shape is not compatible, we should raise an error, as we do with other
# Distributions when they are queried at a point of inconsistent shape.
log_prob_increment = self._log_prob_increment_fn()
return tf.broadcast_to(
log_prob_increment,
ps.broadcast_shape(ps.shape(log_prob_increment),
ps.shape(x)[:-1]))
def _unnormalized_log_prob(self, x):
return self._log_prob(x)
def _batch_shape(self):
return self._static_batch_shape
def _batch_shape_tensor(self):
if tensorshape_util.is_fully_defined(self._static_batch_shape):
batch_shape = self._static_batch_shape
else:
batch_shape = ps.shape(self._log_prob_increment_fn())
return batch_shape
def _event_shape(self):
return tf.TensorShape([0])
def _event_shape_tensor(self):
return [0]
def _sample_n(self, n, seed=None):
del seed
return tf.zeros(
ps.concat(
[[n], self.batch_shape_tensor(),
self.event_shape_tensor()], axis=0))
def _default_event_space_bijector(self):
return identity_bijector.Identity(validate_args=self.validate_args)
@log_prob_ratio.RegisterLogProbRatio(IncrementLogProb)
def _log_prob_increment_log_prob_ratio(p, x, q, y, name=None):
"""Computes the log-prob ratio."""
del x, y
# pylint: disable=protected-access
with tf.name_scope(name or 'log_prob_increment_log_prob_ratio'):
if (p._log_prob_ratio_fn is not None and
p._log_prob_ratio_fn is q._log_prob_ratio_fn):
return p._log_prob_ratio_fn(p._log_prob_increment_kwargs,
q._log_prob_increment_kwargs)
else:
return p.unnormalized_log_prob(()) - q.unnormalized_log_prob(())
|
{
"content_hash": "d5c80f391f6bb39f384e5b2845bb76cf",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 110,
"avg_line_length": 38.49444444444445,
"alnum_prop": 0.6951941117044307,
"repo_name": "tensorflow/probability",
"id": "28c675d2b336668b504420408cc0df96712463d4",
"size": "7608",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_probability/python/experimental/distributions/increment_log_prob.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "55552121"
},
{
"name": "Python",
"bytes": "17339674"
},
{
"name": "Shell",
"bytes": "24852"
},
{
"name": "Starlark",
"bytes": "663851"
}
],
"symlink_target": ""
}
|
"""
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Rack shape table.
"""
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import UniqueConstraint
__docformat__ = 'reStructuredText en'
__all__ = ['create_table']
def create_table(metadata):
"Table factory."
tbl = Table('rack_shape', metadata,
Column('rack_shape_name', String, primary_key=True),
Column('number_rows', Integer, CheckConstraint('number_rows>0'),
nullable=False),
Column('number_columns', Integer,
CheckConstraint('number_columns>0'), nullable=False),
Column('label', String, nullable=False, unique=True),
UniqueConstraint('number_rows', 'number_columns'),
)
return tbl
|
{
"content_hash": "6c2bfe96bab9558419bae3f41201016f",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 80,
"avg_line_length": 32.166666666666664,
"alnum_prop": 0.7015544041450777,
"repo_name": "helixyte/TheLMA",
"id": "0a6600d749620441a72e6dd3afd4b29531de0e03",
"size": "965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thelma/repositories/rdb/schema/tables/rackshape.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3126"
},
{
"name": "Python",
"bytes": "3329729"
},
{
"name": "Shell",
"bytes": "3071"
}
],
"symlink_target": ""
}
|
import unittest
import pytest
import mock
import six
import docker
import os
import json
import logging
import sys
import tarfile
import io
from io import BytesIO
import uuid
from docker_scripts.squash import Squash
from docker_scripts.errors import SquashError
if not six.PY3:
import docker_scripts.lib.xtarfile
class TestIntegSquash(unittest.TestCase):
docker = docker.Client(version='1.16')
log = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
class Image(object):
def __init__(self, dockerfile):
self.dockerfile = dockerfile
self.docker = TestIntegSquash.docker
self.name = "integ-%s" % uuid.uuid1()
self.tag = "%s:latest" % self.name
def __enter__(self):
f = BytesIO(self.dockerfile.encode('utf-8'))
for line in self.docker.build(fileobj=f, tag=self.tag, rm=True):
try:
print(json.loads(line)["stream"].strip())
except:
print(line)
self.history = self.docker.history(self.tag)
self.layers = [o['Id'] for o in self.history]
self.metadata = self.docker.inspect_image(self.tag)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not os.getenv('CI'):
self.docker.remove_image(image=self.tag, force=True)
class SquashedImage(object):
def __init__(self, image, number_of_layers, output_path=None):
self.image = image
self.number_of_layers = number_of_layers
self.docker = TestIntegSquash.docker
self.log = TestIntegSquash.log
self.tag = "%s:squashed" % self.image.name
self.output_path = output_path
def __enter__(self):
from_layer = self.docker.history(
self.image.tag)[self.number_of_layers]['Id']
squash = Squash(
self.log, self.image.tag, self.docker, tag=self.tag, from_layer=from_layer, output_path=self.output_path)
self.image_id = squash.run()
if not self.output_path:
self.squashed_layer = self._squashed_layer()
self.layers = [o['Id'] for o in self.docker.history(self.tag)]
self.metadata = self.docker.inspect_image(self.tag)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not (os.getenv('CI') or self.output_path):
self.docker.remove_image(image=self.tag, force=True)
def _save_image(self):
image = self.docker.get_image(self.tag)
buf = io.BytesIO()
buf.write(image.data)
buf.seek(0) # Rewind
return buf
def _extract_file(self, name, tar_object):
with tarfile.open(fileobj=tar_object, mode='r') as tar:
member = tar.getmember(name)
return tar.extractfile(member)
def _squashed_layer(self):
image_id = self.docker.inspect_image(self.tag)['Id']
image = self._save_image()
return self._extract_file(image_id + '/layer.tar', image)
def assertFileExists(self, name):
self.squashed_layer.seek(0) # Rewind
with tarfile.open(fileobj=self.squashed_layer, mode='r') as tar:
assert name in tar.getnames(
), "File '%s' was not found in the squashed files: %s" % (name, tar.getnames())
def assertFileDoesNotExist(self, name):
self.squashed_layer.seek(0) # Rewind
with tarfile.open(fileobj=self.squashed_layer, mode='r') as tar:
assert name not in tar.getnames(
), "File '%s' was found in the squashed layer files: %s" % (name, tar.getnames())
def assertFileIsNotHardLink(self, name):
self.squashed_layer.seek(0) # Rewind
with tarfile.open(fileobj=self.squashed_layer, mode='r') as tar:
member = tar.getmember(name)
assert member.islnk(
) == False, "File '%s' should not be a hard link, but it is" % name
class Container(object):
def __init__(self, image):
self.image = image
self.docker = TestIntegSquash.docker
self.log = TestIntegSquash.log
def __enter__(self):
self.container = self.docker.create_container(image=self.image.tag)
data = self.docker.export(self.container)
self.content = six.BytesIO(data.read())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if not os.getenv('CI'):
self.docker.remove_container(self.container, force=True)
def assertFileExists(self, name):
self.content.seek(0) # Rewind
with tarfile.open(fileobj=self.content, mode='r') as tar:
assert name in tar.getnames(
), "File %s was not found in the container files: %s" % (name, tar.getnames())
def assertFileDoesNotExist(self, name):
self.content.seek(0) # Rewind
with tarfile.open(fileobj=self.content, mode='r') as tar:
assert name not in tar.getnames(
), "File %s was found in the container files: %s" % (name, tar.getnames())
def test_all_files_should_be_in_squashed_layer(self):
"""
We squash all layers in RUN, all files should be in the resulting squashed layer.
"""
dockerfile = '''
FROM busybox
RUN touch /somefile_layer1
RUN touch /somefile_layer2
RUN touch /somefile_layer3
'''
with self.Image(dockerfile) as image:
with self.SquashedImage(image, 3) as squashed_image:
squashed_image.assertFileDoesNotExist('.wh.somefile_layer1')
squashed_image.assertFileDoesNotExist('.wh.somefile_layer2')
squashed_image.assertFileDoesNotExist('.wh.somefile_layer3')
squashed_image.assertFileExists('somefile_layer1')
squashed_image.assertFileExists('somefile_layer2')
squashed_image.assertFileExists('somefile_layer3')
with self.Container(squashed_image) as container:
container.assertFileExists('somefile_layer1')
container.assertFileExists('somefile_layer2')
container.assertFileExists('somefile_layer3')
# We should have two layers less in the image
self.assertTrue(
len(squashed_image.layers) == len(image.layers) - 2)
def test_only_files_from_squashed_image_should_be_in_squashed_layer(self):
"""
We squash all layers in RUN, all files should be in the resulting squashed layer.
"""
dockerfile = '''
FROM busybox
RUN touch /somefile_layer1
RUN touch /somefile_layer2
RUN touch /somefile_layer3
'''
with self.Image(dockerfile) as image:
with self.SquashedImage(image, 2) as squashed_image:
squashed_image.assertFileDoesNotExist('.wh.somefile_layer2')
squashed_image.assertFileDoesNotExist('.wh.somefile_layer3')
# This file should not be in the squashed layer
squashed_image.assertFileDoesNotExist('somefile_layer1')
# Nor a marker files for it
squashed_image.assertFileDoesNotExist('.wh.somefile_layer1')
squashed_image.assertFileExists('somefile_layer2')
squashed_image.assertFileExists('somefile_layer3')
with self.Container(squashed_image) as container:
# This file should be in the container
container.assertFileExists('somefile_layer1')
container.assertFileExists('somefile_layer2')
container.assertFileExists('somefile_layer3')
# We should have two layers less in the image
self.assertEqual(
len(squashed_image.layers), len(image.layers) - 1)
def test_there_should_be_a_marker_file_in_the_squashed_layer(self):
"""
Here we're testing that the squashed layer should contain a '.wh.somefile_layer1'
file, because the file was not found in the squashed tar and it is present in
the layers we do not squash.
"""
dockerfile = '''
FROM busybox
RUN touch /somefile_layer1
RUN rm /somefile_layer1
RUN touch /somefile_layer3
'''
with self.Image(dockerfile) as image:
with self.SquashedImage(image, 2) as squashed_image:
squashed_image.assertFileDoesNotExist('somefile_layer1')
squashed_image.assertFileExists('somefile_layer3')
squashed_image.assertFileExists('.wh.somefile_layer1')
squashed_image.assertFileIsNotHardLink('.wh.somefile_layer1')
with self.Container(squashed_image) as container:
container.assertFileExists('somefile_layer3')
container.assertFileDoesNotExist('somefile_layer1')
# We should have one layer less in the image
self.assertEqual(
len(squashed_image.layers), len(image.layers) - 1)
def test_there_should_be_a_marker_file_in_the_squashed_layer_even_more_complex(self):
dockerfile = '''
FROM busybox
RUN touch /somefile_layer1
RUN rm /somefile_layer1
RUN touch /somefile_layer2
RUN touch /somefile_layer3
RUN rm /somefile_layer2
RUN touch /somefile_layer4
'''
with self.Image(dockerfile) as image:
with self.SquashedImage(image, 2) as squashed_image:
squashed_image.assertFileDoesNotExist('somefile_layer1')
squashed_image.assertFileDoesNotExist('somefile_layer2')
squashed_image.assertFileDoesNotExist('somefile_layer3')
squashed_image.assertFileExists('somefile_layer4')
squashed_image.assertFileDoesNotExist('.wh.somefile_layer1')
squashed_image.assertFileExists('.wh.somefile_layer2')
squashed_image.assertFileIsNotHardLink('.wh.somefile_layer2')
squashed_image.assertFileDoesNotExist('.wh.somefile_layer3')
squashed_image.assertFileDoesNotExist('.wh.somefile_layer4')
with self.Container(squashed_image) as container:
container.assertFileExists('somefile_layer3')
container.assertFileExists('somefile_layer4')
container.assertFileDoesNotExist('somefile_layer1')
container.assertFileDoesNotExist('somefile_layer2')
# We should have one layer less in the image
self.assertEqual(
len(squashed_image.layers), len(image.layers) - 1)
def test_should_handle_removal_of_directories(self):
dockerfile = '''
FROM busybox
RUN mkdir -p /some/dir/tree
RUN touch /some/dir/tree/file1
RUN touch /some/dir/tree/file2
RUN touch /some/dir/file1
RUN touch /some/dir/file2
RUN rm -rf /some/dir/tree
'''
with self.Image(dockerfile) as image:
with self.SquashedImage(image, 2) as squashed_image:
squashed_image.assertFileDoesNotExist('some/dir/tree/file1')
squashed_image.assertFileDoesNotExist('some/dir/tree/file2')
squashed_image.assertFileDoesNotExist('some/dir/file1')
squashed_image.assertFileExists('some/dir/file2')
squashed_image.assertFileExists('some/dir/.wh.tree')
squashed_image.assertFileIsNotHardLink('some/dir/.wh.tree')
with self.Container(squashed_image) as container:
container.assertFileExists('some/dir/file1')
container.assertFileExists('some/dir/file2')
container.assertFileDoesNotExist('some/dir/tree')
container.assertFileDoesNotExist('some/dir/tree/file1')
container.assertFileDoesNotExist('some/dir/tree/file2')
# We should have one layer less in the image
self.assertEqual(
len(squashed_image.layers), len(image.layers) - 1)
def test_should_skip_files_when_these_are_modified_and_removed_in_squashed_layer(self):
dockerfile = '''
FROM busybox
RUN touch /file
RUN chmod -R 777 /file
RUN rm -rf /file
'''
with self.Image(dockerfile) as image:
with self.SquashedImage(image, 2) as squashed_image:
squashed_image.assertFileDoesNotExist('file')
squashed_image.assertFileExists('.wh.file')
squashed_image.assertFileIsNotHardLink('.wh.file')
with self.Container(squashed_image) as container:
container.assertFileDoesNotExist('file')
# We should have one layer less in the image
self.assertEqual(
len(squashed_image.layers), len(image.layers) - 1)
def test_should_skip_files_when_these_are_removed_and_modified_in_squashed_layer(self):
dockerfile = '''
FROM busybox
RUN touch /file
RUN chmod -R 777 /file
RUN rm -rf /file
RUN touch /file
'''
with self.Image(dockerfile) as image:
with self.SquashedImage(image, 3) as squashed_image:
squashed_image.assertFileExists('file')
squashed_image.assertFileDoesNotExist('.wh.file')
with self.Container(squashed_image) as container:
container.assertFileExists('file')
# We should have two layers less in the image
self.assertEqual(
len(squashed_image.layers), len(image.layers) - 2)
def test_should_handle_multiple_changes_to_files_in_squashed_layers(self):
dockerfile = '''
FROM busybox
RUN mkdir -p /some/dir/tree
RUN touch /some/dir/tree/file1
RUN touch /some/dir/tree/file2
RUN touch /some/dir/file1
RUN touch /some/dir/file2
RUN chmod -R 777 /some
RUN rm -rf /some/dir/tree
'''
with self.Image(dockerfile) as image:
with self.SquashedImage(image, 2) as squashed_image:
squashed_image.assertFileDoesNotExist('some/dir/tree/file1')
squashed_image.assertFileDoesNotExist('some/dir/tree/file2')
squashed_image.assertFileExists('some/dir/file1')
squashed_image.assertFileExists('some/dir/file2')
squashed_image.assertFileExists('some/dir/.wh.tree')
squashed_image.assertFileIsNotHardLink('some/dir/.wh.tree')
with self.Container(squashed_image) as container:
container.assertFileExists('some/dir/file1')
container.assertFileExists('some/dir/file2')
container.assertFileDoesNotExist('some/dir/tree')
container.assertFileDoesNotExist('some/dir/tree/file1')
container.assertFileDoesNotExist('some/dir/tree/file2')
# We should have one layer less in the image
self.assertEqual(
len(squashed_image.layers), len(image.layers) - 1)
# https://github.com/goldmann/docker-scripts/issues/28
def test_docker_version_in_metadata_should_be_set_after_squashing(self):
dockerfile = '''
FROM busybox
RUN touch file
'''
with self.Image(dockerfile) as image:
with self.SquashedImage(image, 2) as squashed_image:
self.assertEqual(
image.metadata['DockerVersion'], squashed_image.metadata['DockerVersion'])
# https://github.com/goldmann/docker-scripts/issues/30
# https://github.com/goldmann/docker-scripts/pull/31
def test_files_in_squashed_tar_not_prefixed_wth_dot(self):
dockerfile = '''
FROM busybox
RUN touch file
'''
with self.Image(dockerfile) as image:
with self.SquashedImage(image, 2, output_path="image.tar") as squashed_image:
with tarfile.open("image.tar", mode='r') as tar:
all_files = tar.getnames()
for name in all_files:
self.assertFalse(name.startswith('.'))
# https://github.com/goldmann/docker-scripts/issues/32
def test_version_file_exists_in_squashed_layer(self):
dockerfile = '''
FROM busybox
RUN touch file
'''
with self.Image(dockerfile) as image:
with self.SquashedImage(image, 2, output_path="image.tar") as squashed_image:
with tarfile.open("image.tar", mode='r') as tar:
all_files = tar.getnames()
self.assertIn(squashed_image.image_id + "/json", all_files)
self.assertIn(
squashed_image.image_id + "/layer.tar", all_files)
self.assertIn(
squashed_image.image_id + "/VERSION", all_files)
# https://github.com/goldmann/docker-scripts/issues/33
def test_docker_size_in_metadata_should_be_upper_case(self):
dockerfile = '''
FROM busybox
RUN touch file
'''
with self.Image(dockerfile) as image:
with self.SquashedImage(image, 2) as squashed_image:
self.assertIsInstance(image.metadata['Size'], int)
with self.assertRaisesRegexp(KeyError, "'size'"):
self.assertEqual(image.metadata['size'], None)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "0705256f5fc5bc469561f478248ff71e",
"timestamp": "",
"source": "github",
"line_count": 446,
"max_line_length": 121,
"avg_line_length": 41.11210762331839,
"alnum_prop": 0.5866055846422339,
"repo_name": "jpopelka/docker-scripts",
"id": "8776b6f1893d03bdc6b3ed0d42666dabb7ddedcc",
"size": "18336",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_integ_squash.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "747"
},
{
"name": "Python",
"bytes": "62062"
}
],
"symlink_target": ""
}
|
from decimal import Decimal
from trytond.pool import Pool, PoolMeta
from jinja2.filters import do_striptags
from werkzeug.exceptions import NotFound
from nereid import jsonify, flash, request, url_for, route, redirect, \
render_template, abort, current_locale
from nereid.contrib.locale import make_lazy_gettext
from forms import GiftCardForm
_ = make_lazy_gettext('gift_card')
__all__ = ['Product']
__metaclass__ = PoolMeta
class Product:
"Product extension for Nereid"
__name__ = "product.product"
def serialize(self, purpose=None):
"""
Downstream implementation which adds a key called inventory_status
to the dictionary if purpose is 'variant_selection'.
:param purpose: String which decides structure of dictionary
"""
res = super(Product, self).serialize(purpose=purpose)
if purpose == 'variant_selection':
res.update({
'inventory_status': self.inventory_status(),
})
return res
def get_default_image(self, name):
"Returns default product image"
ModelData = Pool().get('ir.model.data')
# Fallback condition if there is no default_image_set defined
images = self.get_images()
if images:
return images[0].id
else:
return ModelData.get_id("nereid_webshop", "mystery_box")
def ga_product_data(self, **kwargs):
'''
Return a dictionary of the product information as expected by Google
Analytics
Other possible values for kwargs include
:param list: The name of the list in which this impression is to be
recorded
:param position: Integer position of the item on the view
'''
rv = {
'id': self.code or unicode(self.id),
'name': self.name,
'category': self.category and self.category.name or None,
}
rv.update(kwargs)
return rv
def json_ld(self, **kwargs):
'''
Returns a JSON serializable dictionary of the product with the Product
schema markup.
See: http://schema.org/Product
Any key value pairs passed to kwargs overwrites default information.
'''
sale_price = self.sale_price(1)
return {
"@context": "http://schema.org",
"@type": "Product",
"name": self.name,
"sku": self.code,
"description": do_striptags(self.description),
"offers": {
"@type": "Offer",
"availability": "http://schema.org/InStock",
"price": str(sale_price),
"priceCurrency": current_locale.currency.code,
},
"image": self.default_image.transform_command().thumbnail(
500, 500, 'a').url(_external=True),
"url": self.get_absolute_url(_external=True),
}
@classmethod
@route('/product/<uri>')
@route('/product/<path:path>/<uri>')
def render(cls, uri, path=None):
"""
Render gift card template if product is of type gift card
"""
render_obj = super(Product, cls).render(uri, path)
if not isinstance(render_obj, NotFound) \
and render_obj.context['product'].is_gift_card:
# Render gift card
return redirect(
url_for('product.product.render_gift_card', uri=uri)
)
return render_obj
@classmethod
@route('/gift-card/<uri>', methods=['GET', 'POST'])
def render_gift_card(cls, uri):
"""
Add gift card as a new line in cart
Request:
'GET': Renders gift card page
'POST': Buy Gift Card
Response:
'OK' if X-HTTPRequest
Redirect to shopping cart if normal request
"""
SaleLine = Pool().get('sale.line')
Cart = Pool().get('nereid.cart')
try:
product, = cls.search([
('displayed_on_eshop', '=', True),
('uri', '=', uri),
('template.active', '=', True),
('is_gift_card', '=', True)
], limit=1)
except ValueError:
abort(404)
form = GiftCardForm(product)
if form.validate_on_submit():
cart = Cart.open_cart(create_order=True)
# Code to add gift card as a line to cart
values = {
'product': product.id,
'sale': cart.sale.id,
'type': 'line',
'sequence': 10,
'quantity': 1,
'unit': None,
'description': None,
'recipient_email': form.recipient_email.data,
'recipient_name': form.recipient_name.data,
'message': form.message.data,
}
order_line = SaleLine(**values)
order_line.on_change_product()
# Here 0 means the default option to enter open amount is
# selected
if form.selected_amount.data != 0:
order_line.gc_price = form.selected_amount.data
order_line.on_change_gc_price()
else:
order_line.unit_price = Decimal(form.open_amount.data)
order_line.save()
message = 'Gift Card has been added to your cart'
if request.is_xhr: # pragma: no cover
return jsonify(message=message)
flash(_(message), 'info')
return redirect(url_for('nereid.cart.view_cart'))
return render_template(
'catalog/gift-card.html', product=product, form=form
)
def get_absolute_url(self, **kwargs):
"""
Return gift card URL if product is a gift card
"""
if self.is_gift_card:
return url_for(
'product.product.render_gift_card', uri=self.uri, **kwargs
)
return super(Product, self).get_absolute_url(**kwargs)
def get_menu_item(self, max_depth):
"""
Return dictionary with serialized node for menu item
{
title: <display name>,
link: <url>,
record: <instance of record> # if type_ is record
}
"""
return {
'record': self,
'title': self.name,
'image': self.default_image,
'link': self.get_absolute_url(),
}
|
{
"content_hash": "4b93a6d63341f85e1ae4bade64d9f66e",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 78,
"avg_line_length": 31.458937198067634,
"alnum_prop": 0.539004914004914,
"repo_name": "sharoonthomas/nereid-webshop",
"id": "d1ac04f3b5a1d9c0cb6d6dd7cf5ed50408611bfb",
"size": "6536",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "product.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "61685"
},
{
"name": "HTML",
"bytes": "221213"
},
{
"name": "JavaScript",
"bytes": "29359"
},
{
"name": "Python",
"bytes": "121308"
}
],
"symlink_target": ""
}
|
"""Demonstrates how to find and remove shared sets/shared set criterions.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
from googleads import adwords
CAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'
PAGE_SIZE = 500
def main(client, campaign_id):
# Initialize appropriate services.
shared_criterion_service = client.GetService('SharedCriterionService',
version='v201506')
campaign_shared_set_service = client.GetService('CampaignSharedSetService',
version='v201506')
shared_set_ids = []
criterion_ids = []
# First, retrieve all shared sets associated with the campaign.
# Create selector for shared sets to:
# - filter by campaign ID,
# - filter by shared set type.
selector = {
'fields': ['SharedSetId', 'CampaignId', 'SharedSetName', 'SharedSetType',
'Status'],
'predicates': [
{
'field': 'CampaignId',
'operator': 'EQUALS',
'values': [campaign_id]
},
{
'field': 'SharedSetType',
'operator': 'IN',
'values': ['NEGATIVE_KEYWORDS', 'NEGATIVE_PLACEMENTS']
}
],
'paging': {
'startIndex': 0,
'numberResults': PAGE_SIZE
}
}
# Set initial values:
offset = 0
page = {'totalNumEntries': 1}
while page['totalNumEntries'] > offset:
page = campaign_shared_set_service.get(selector)
if 'entries' in page:
for shared_set in page['entries']:
print 'Campaign shared set ID %d and name "%s"' % (
shared_set['sharedSetId'], shared_set['sharedSetName']
)
shared_set_ids.append(shared_set['sharedSetId'])
# Increment values to request the next page.
offset += PAGE_SIZE
selector['paging']['startIndex'] = offset
# Next, retrieve criterion IDs for all found shared sets.
selector = {
'fields': ['SharedSetId', 'Id', 'KeywordText', 'KeywordMatchType',
'PlacementUrl'],
'predicates': [
{
'field': 'SharedSetId',
'operator': 'IN',
'values': shared_set_ids
}
],
'paging': {
'startIndex': 0,
'numberResults': PAGE_SIZE
}
}
# Set initial values:
offset = 0
page = {'totalNumEntries': 1}
while page['totalNumEntries'] > offset:
page = shared_criterion_service.get(selector)
if page['entries']:
for shared_criterion in page['entries']:
if shared_criterion['criterion']['type'] == 'KEYWORD':
print ('Shared negative keyword with ID %d and text "%s" was'
'found.' % (shared_criterion['criterion']['id'],
shared_criterion['criterion']['text']))
elif shared_criterion['criterion']['type'] == 'PLACEMENT':
print ('Shared negative placement with ID %d and url "%s" was'
'found.' % (shared_criterion['criterion']['id'],
shared_criterion['criterion']['url']))
else:
print 'Shared criterion with ID %d was found.' % (
shared_criterion['criterion']['id'],
)
criterion_ids.append({
'sharedSetId': shared_criterion['sharedSetId'],
'criterionId': shared_criterion['criterion']['id']
})
# Increment values to request the next page.
offset += PAGE_SIZE
selector['paging']['startIndex'] = offset
# Finally, remove the criteria.
operations = [
{
'operator': 'REMOVE',
'operand': {
'criterion': {'id': criterion['criterionId']},
'sharedSetId': criterion['sharedSetId']
}
} for criterion in criterion_ids
]
response = shared_criterion_service.mutate(operations)
if 'value' in response:
for criterion in response['value']:
print ('Criterion ID %d was successfully removed from shared set ID'
'%d.' % (criterion['criterion']['id'], criterion['sharedSetId']))
else:
print 'No shared criteria were removed.'
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, CAMPAIGN_ID)
|
{
"content_hash": "43e76154b55e93b67d7e38fb6ba0c2c6",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 79,
"avg_line_length": 32.30935251798561,
"alnum_prop": 0.5771543086172345,
"repo_name": "wubr2000/googleads-python-lib",
"id": "56f39737329f3f6b5d29cecd0b4d1d1749da3701",
"size": "5109",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/adwords/v201506/advanced_operations/find_and_remove_criteria_from_shared_set.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168602"
}
],
"symlink_target": ""
}
|
"""
This module defines commonly used code for the mamo package
"""
__author__ = "Iqbal Abdullah <iqbal@marimore.co.jp>"
__date__ = "$LastChangedDate$"
__version__ = "$LastChangedRevision$"
import re
class BaseClass(object):
"""
BaseClass contains very common functions
and implementations for mamo classes. All of mamo packages classes
uses BaseClass as their parent class
"""
def read_only_property(self):
raise (AttributeError), "Read-only attribute"
def _prop_set_classname(self, value):
self.read_only_property()
def _prop_get_classname(self):
compiled_re = re.compile("'.*'")
clsname = compiled_re.search("%s" % (self.__class__)).group()
clsname = clsname.replace("'","")
clsname = clsname.replace("%s" % (self.__module__), "")
clsname = clsname.replace(".","")
return clsname
myclassname = property(_prop_get_classname, _prop_set_classname,
doc="Returns the name of the class")
|
{
"content_hash": "b03118f3a7d93efd6b152e18afbe9823",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 70,
"avg_line_length": 31.393939393939394,
"alnum_prop": 0.611969111969112,
"repo_name": "k4ml/Marimorepy",
"id": "3d3f724f0e446d56e8c2de15dc3426446ebb2348",
"size": "2741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mamopublic/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "3167"
},
{
"name": "Python",
"bytes": "60262"
}
],
"symlink_target": ""
}
|
import benchexec.result as result
from . import cbmc
class Tool(cbmc.Tool):
"""
Tool info for JBMC (http://www.cprover.org/cbmc/).
It always adds --xml-ui to the command-line arguments for easier parsing of
the output, unless a propertyfile is passed -- in which case running under
SV-COMP conditions is assumed.
"""
def executable(self, tool_locator):
return tool_locator.find_executable("jbmc")
def name(self):
return "JBMC"
def determine_result(self, run):
status = result.RESULT_ERROR
if run.exit_code.value in [0, 10]:
result_str = run.output[-1].strip()
if result_str == "TRUE":
status = result.RESULT_TRUE_PROP
elif result_str == "FALSE":
status = result.RESULT_FALSE_PROP
elif "UNKNOWN" in run.output:
status = result.RESULT_UNKNOWN
elif run.exit_code.value == 64 and "Usage error!" in run.output:
status = "INVALID ARGUMENTS"
elif run.exit_code.value == 6 and "Out of memory" in run.output:
status = "OUT OF MEMORY"
return status
|
{
"content_hash": "c16ca394fd82cecb868a5de263794935",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 79,
"avg_line_length": 30.55263157894737,
"alnum_prop": 0.5986218776916451,
"repo_name": "dbeyer/benchexec",
"id": "732537e81d7449b6f8db883a80044857959d8018",
"size": "1391",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "benchexec/tools/jbmc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gnuplot",
"bytes": "3902"
},
{
"name": "HTML",
"bytes": "60587"
},
{
"name": "PHP",
"bytes": "4380"
},
{
"name": "Python",
"bytes": "598496"
},
{
"name": "Shell",
"bytes": "4750"
}
],
"symlink_target": ""
}
|
"""Provides a menu hook for several other plugins.
See :doc:`/specs/office`.
"""
from lino.api import ad, _
class Plugin(ad.Plugin):
"See :doc:`/dev/plugins`."
verbose_name = _("Office")
|
{
"content_hash": "657d89e95928142ed295a3060d33d7aa",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 50,
"avg_line_length": 16.666666666666668,
"alnum_prop": 0.635,
"repo_name": "lino-framework/lino",
"id": "73ff7671b47ceb1a1c1582f67502b432345df387",
"size": "317",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lino/modlib/office/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "704"
},
{
"name": "CSS",
"bytes": "1281825"
},
{
"name": "Emacs Lisp",
"bytes": "277895"
},
{
"name": "HTML",
"bytes": "928037"
},
{
"name": "Hack",
"bytes": "3416"
},
{
"name": "JavaScript",
"bytes": "1128493"
},
{
"name": "PHP",
"bytes": "53997"
},
{
"name": "Python",
"bytes": "2601694"
},
{
"name": "Shell",
"bytes": "4469"
},
{
"name": "TSQL",
"bytes": "2427"
}
],
"symlink_target": ""
}
|
from . import Event
class Tick(Event):
"""
Every time the background app ticks, this event is dispatched.
data contains the time at which the event was dispatched
"""
pass
class MoveToForeground(Event):
pass
|
{
"content_hash": "ce3722fc515a19e1a8c3dc787de471b1",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 66,
"avg_line_length": 19.5,
"alnum_prop": 0.688034188034188,
"repo_name": "Grisou13/mawie",
"id": "2a6364693153dc56d15937891dd656bd89462f0c",
"size": "234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mawie/events/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "386"
},
{
"name": "Python",
"bytes": "353663"
}
],
"symlink_target": ""
}
|
"""pyzombie HTTP RESTful server handler giving a web form to add an
executable."""
__author__ = ('Lance Finn Helsten',)
__version__ = '1.0.1'
__copyright__ = """Copyright 2009 Lance Finn Helsten (helsten@acm.org)"""
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "reStructuredText en"
__all__ = ['HandlerExecAdd']
import sys
import io
import logging
import mimetypes
import http.client
from .HandlerLeftovers import HandlerLeftovers
class HandlerExecAdd(HandlerLeftovers):
"""Handle the add executable resource."""
@classmethod
def dispatch(cls):
cls.initdispatch(r"""^/add$""",
"GET,POST,OPTIONS,TRACE",
"/help/RESTful")
return cls
def __init__(self, req, urlargs):
super().__init__(req, {"leftover":"ExecAdd.html"})
def post(self):
fs = self.multipart()
if fs:
ctype, enc = mimetypes.guess_type(fs['execfile'].filename)
self.initexecutable(mediatype=ctype)
datafp = fs['execfile'].file
self.executable.writeimage(datafp)
self.nocache = True
self.status = http.client.CREATED
self["Location"] = self.serverurl(self.executable.name)
self.flush()
else:
self.error(http.client.UNSUPPORTED_MEDIA_TYPE)
|
{
"content_hash": "2302a9d4618312f7b1218de9a9c80aab",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 73,
"avg_line_length": 31.016666666666666,
"alnum_prop": 0.6496507254164428,
"repo_name": "lanhel/pyzombie",
"id": "aeaf6f522277c07da51d62564ac3f7f877729af1",
"size": "1988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyzombie/handlers/HandlerExecAdd.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6328"
},
{
"name": "HTML",
"bytes": "2952"
},
{
"name": "Makefile",
"bytes": "4623"
},
{
"name": "Python",
"bytes": "166775"
}
],
"symlink_target": ""
}
|
import unittest
import random
from scapy.packet import Raw
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
from framework import VppTestCase, VppTestRunner
from util import Host, ppp
class TestL2xc(VppTestCase):
""" L2XC Test Case """
@classmethod
def setUpClass(cls):
"""
Perform standard class setup (defined by class method setUpClass in
class VppTestCase) before running the test case, set test case related
variables and configure VPP.
:var int hosts_nr: Number of hosts to be created.
:var int dl_pkts_per_burst: Number of packets in burst for dual-loop
test.
:var int sl_pkts_per_burst: Number of packets in burst for single-loop
test.
"""
super(TestL2xc, cls).setUpClass()
# Test variables
cls.hosts_nr = 10
cls.dl_pkts_per_burst = 257
cls.sl_pkts_per_burst = 2
try:
# create 4 pg interfaces
cls.create_pg_interfaces(range(4))
# packet flows mapping pg0 -> pg1, pg2 -> pg3, etc.
cls.flows = dict()
cls.flows[cls.pg0] = [cls.pg1]
cls.flows[cls.pg1] = [cls.pg0]
cls.flows[cls.pg2] = [cls.pg3]
cls.flows[cls.pg3] = [cls.pg2]
# packet sizes
cls.pg_if_packet_sizes = [64, 512, 1518, 9018]
cls.interfaces = list(cls.pg_interfaces)
# Create bi-directional cross-connects between pg0 and pg1
cls.vapi.sw_interface_set_l2_xconnect(
cls.pg0.sw_if_index, cls.pg1.sw_if_index, enable=1)
cls.vapi.sw_interface_set_l2_xconnect(
cls.pg1.sw_if_index, cls.pg0.sw_if_index, enable=1)
# Create bi-directional cross-connects between pg2 and pg3
cls.vapi.sw_interface_set_l2_xconnect(
cls.pg2.sw_if_index, cls.pg3.sw_if_index, enable=1)
cls.vapi.sw_interface_set_l2_xconnect(
cls.pg3.sw_if_index, cls.pg2.sw_if_index, enable=1)
# mapping between packet-generator index and lists of test hosts
cls.hosts_by_pg_idx = dict()
# Create host MAC and IPv4 lists
cls.create_host_lists(cls.hosts_nr)
# setup all interfaces
for i in cls.interfaces:
i.admin_up()
except Exception:
super(TestL2xc, cls).tearDownClass()
raise
@classmethod
def tearDownClass(cls):
super(TestL2xc, cls).tearDownClass()
def setUp(self):
super(TestL2xc, self).setUp()
self.reset_packet_infos()
def tearDown(self):
"""
Show various debug prints after each test.
"""
super(TestL2xc, self).tearDown()
def show_commands_at_teardown(self):
self.logger.info(self.vapi.ppcli("show l2patch"))
@classmethod
def create_host_lists(cls, count):
"""
Method to create required number of MAC and IPv4 addresses.
Create required number of host MAC addresses and distribute them among
interfaces. Create host IPv4 address for every host MAC address too.
:param count: Number of hosts to create MAC and IPv4 addresses for.
"""
for pg_if in cls.pg_interfaces:
cls.hosts_by_pg_idx[pg_if.sw_if_index] = []
hosts = cls.hosts_by_pg_idx[pg_if.sw_if_index]
for j in range(0, count):
host = Host(
"00:00:00:ff:%02x:%02x" % (pg_if.sw_if_index, j),
"172.17.1%02x.%u" % (pg_if.sw_if_index, j))
hosts.append(host)
def create_stream(self, src_if, packet_sizes, packets_per_burst):
"""
Create input packet stream for defined interface.
:param object src_if: Interface to create packet stream for.
:param list packet_sizes: List of required packet sizes.
:param int packets_per_burst: Number of packets in burst.
:return: Stream of packets.
"""
pkts = []
for i in range(0, packets_per_burst):
dst_if = self.flows[src_if][0]
dst_host = random.choice(self.hosts_by_pg_idx[dst_if.sw_if_index])
src_host = random.choice(self.hosts_by_pg_idx[src_if.sw_if_index])
pkt_info = self.create_packet_info(src_if, dst_if)
payload = self.info_to_payload(pkt_info)
p = (Ether(dst=dst_host.mac, src=src_host.mac) /
IP(src=src_host.ip4, dst=dst_host.ip4) /
UDP(sport=1234, dport=1234) /
Raw(payload))
pkt_info.data = p.copy()
size = random.choice(packet_sizes)
self.extend_packet(p, size)
pkts.append(p)
return pkts
def verify_capture(self, pg_if, capture):
"""
Verify captured input packet stream for defined interface.
:param object pg_if: Interface to verify captured packet stream for.
:param list capture: Captured packet stream.
"""
last_info = dict()
for i in self.interfaces:
last_info[i.sw_if_index] = None
dst_sw_if_index = pg_if.sw_if_index
for packet in capture:
try:
ip = packet[IP]
udp = packet[UDP]
payload_info = self.payload_to_info(packet[Raw])
packet_index = payload_info.index
self.assertEqual(payload_info.dst, dst_sw_if_index)
self.logger.debug("Got packet on port %s: src=%u (id=%u)" %
(pg_if.name, payload_info.src, packet_index))
next_info = self.get_next_packet_info_for_interface2(
payload_info.src, dst_sw_if_index,
last_info[payload_info.src])
last_info[payload_info.src] = next_info
self.assertTrue(next_info is not None)
self.assertEqual(packet_index, next_info.index)
saved_packet = next_info.data
# Check standard fields
self.assertEqual(ip.src, saved_packet[IP].src)
self.assertEqual(ip.dst, saved_packet[IP].dst)
self.assertEqual(udp.sport, saved_packet[UDP].sport)
self.assertEqual(udp.dport, saved_packet[UDP].dport)
except:
self.logger.error(ppp("Unexpected or invalid packet:", packet))
raise
for i in self.interfaces:
remaining_packet = self.get_next_packet_info_for_interface2(
i, dst_sw_if_index, last_info[i.sw_if_index])
self.assertTrue(remaining_packet is None,
"Port %u: Packet expected from source %u didn't"
" arrive" % (dst_sw_if_index, i.sw_if_index))
def run_l2xc_test(self, pkts_per_burst):
""" L2XC test """
# Create incoming packet streams for packet-generator interfaces
for i in self.interfaces:
pkts = self.create_stream(i, self.pg_if_packet_sizes,
pkts_per_burst)
i.add_stream(pkts)
# Enable packet capturing and start packet sending
self.pg_enable_capture(self.pg_interfaces)
self.pg_start()
# Verify outgoing packet streams per packet-generator interface
for i in self.pg_interfaces:
capture = i.get_capture()
self.logger.info("Verifying capture on interface %s" % i.name)
self.verify_capture(i, capture)
def test_l2xc_sl(self):
""" L2XC single-loop test
Test scenario:
1. config
2 pairs of 2 interfaces, l2xconnected
2. sending l2 eth packets between 4 interfaces
64B, 512B, 1518B, 9018B (ether_size)
burst of 2 packets per interface
"""
self.run_l2xc_test(self.sl_pkts_per_burst)
def test_l2xc_dl(self):
""" L2XC dual-loop test
Test scenario:
1. config
2 pairs of 2 interfaces, l2xconnected
2. sending l2 eth packets between 4 interfaces
64B, 512B, 1518B, 9018B (ether_size)
burst of 257 packets per interface
"""
self.run_l2xc_test(self.dl_pkts_per_burst)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
{
"content_hash": "0dc37b5c1e6ba81080be27e4adbd6699",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 79,
"avg_line_length": 36.96506550218341,
"alnum_prop": 0.5663319551092735,
"repo_name": "vpp-dev/vpp",
"id": "bc653f0b4fb833b57f74675f971a65ed6d5e4af0",
"size": "8489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_l2xc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "16871"
},
{
"name": "C",
"bytes": "21411560"
},
{
"name": "C++",
"bytes": "2210928"
},
{
"name": "CMake",
"bytes": "179409"
},
{
"name": "CSS",
"bytes": "847"
},
{
"name": "Emacs Lisp",
"bytes": "111146"
},
{
"name": "Go",
"bytes": "13884"
},
{
"name": "HTML",
"bytes": "612"
},
{
"name": "Lua",
"bytes": "79974"
},
{
"name": "M4",
"bytes": "257"
},
{
"name": "Makefile",
"bytes": "120923"
},
{
"name": "Objective-C",
"bytes": "50546"
},
{
"name": "Python",
"bytes": "3767934"
},
{
"name": "Ruby",
"bytes": "8015"
},
{
"name": "Shell",
"bytes": "106805"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^event/$', views.EventList.as_view(), name='event'),
url(r'^event/(?P<pk>[0-9]+)/$', views.EventDetail.as_view(),
name='detail'),
url(r'^event/new$', views.EventCreate.as_view(), name='new'),
url(r'^event/edit/(?P<pk>[0-9]+)/$',
views.EventUpdate.as_view(), name='edit'),
url(r'^event/delete/(?P<pk>[0-9]+)/$',
views.EventDelete.as_view(), name='delete'),
]
|
{
"content_hash": "437eecc9a62bfdf63e200eb10c4d57a3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 62,
"avg_line_length": 34.69230769230769,
"alnum_prop": 0.6164079822616408,
"repo_name": "Jhonbeltran/information-layer8",
"id": "a02f2a49c16588916b26191037ec8032352e2f2a",
"size": "451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "events/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "162378"
},
{
"name": "HTML",
"bytes": "17338"
},
{
"name": "JavaScript",
"bytes": "324493"
},
{
"name": "Python",
"bytes": "26785"
}
],
"symlink_target": ""
}
|
"""Record of phased-in incompatible language changes.
Each line is of the form:
FeatureName = "_Feature(" OptionalRelease "," MandatoryRelease ","
CompilerFlag ")"
where, normally, OptionalRelease < MandatoryRelease, and both are 5-tuples
of the same form as sys.version_info:
(PY_MAJOR_VERSION, # the 2 in 2.1.0a3; an int
PY_MINOR_VERSION, # the 1; an int
PY_MICRO_VERSION, # the 0; an int
PY_RELEASE_LEVEL, # "alpha", "beta", "candidate" or "final"; string
PY_RELEASE_SERIAL # the 3; an int
)
OptionalRelease records the first release in which
from __future__ import FeatureName
was accepted.
In the case of MandatoryReleases that have not yet occurred,
MandatoryRelease predicts the release in which the feature will become part
of the language.
Else MandatoryRelease records when the feature became part of the language;
in releases at or after that, modules no longer need
from __future__ import FeatureName
to use the feature in question, but may continue to use such imports.
MandatoryRelease may also be None, meaning that a planned feature got
dropped.
Instances of class _Feature have two corresponding methods,
.getOptionalRelease() and .getMandatoryRelease().
CompilerFlag is the (bitfield) flag that should be passed in the fourth
argument to the builtin function compile() to enable the feature in
dynamically compiled code. This flag is stored in the .compiler_flag
attribute on _Future instances. These values must match the appropriate
#defines of CO_xxx flags in Include/compile.h.
No feature line is ever to be deleted from this file.
"""
all_feature_names = [
"nested_scopes",
"generators",
"division",
"absolute_import",
"with_statement",
"print_function",
"unicode_literals",
"barry_as_FLUFL",
"generator_stop",
"annotations",
]
__all__ = ["all_feature_names"] + all_feature_names
# The CO_xxx symbols are defined here under the same names defined in
# code.h and used by compile.h, so that an editor search will find them here.
# However, they're not exported in __all__, because they don't really belong to
# this module.
CO_NESTED = 0x0010 # nested_scopes
CO_GENERATOR_ALLOWED = 0 # generators (obsolete, was 0x1000)
CO_FUTURE_DIVISION = 0x2000 # division
CO_FUTURE_ABSOLUTE_IMPORT = 0x4000 # perform absolute imports by default
CO_FUTURE_WITH_STATEMENT = 0x8000 # with statement
CO_FUTURE_PRINT_FUNCTION = 0x10000 # print function
CO_FUTURE_UNICODE_LITERALS = 0x20000 # unicode string literals
CO_FUTURE_BARRY_AS_BDFL = 0x40000
CO_FUTURE_GENERATOR_STOP = 0x80000 # StopIteration becomes RuntimeError in generators
CO_FUTURE_ANNOTATIONS = 0x100000 # annotations become strings at runtime
class _Feature:
def __init__(self, optionalRelease, mandatoryRelease, compiler_flag):
self.optional = optionalRelease
self.mandatory = mandatoryRelease
self.compiler_flag = compiler_flag
def getOptionalRelease(self):
"""Return first release in which this feature was recognized.
This is a 5-tuple, of the same form as sys.version_info.
"""
return self.optional
def getMandatoryRelease(self):
"""Return release in which this feature will become mandatory.
This is a 5-tuple, of the same form as sys.version_info, or, if
the feature was dropped, is None.
"""
return self.mandatory
def __repr__(self):
return "_Feature" + repr((self.optional,
self.mandatory,
self.compiler_flag))
nested_scopes = _Feature((2, 1, 0, "beta", 1),
(2, 2, 0, "alpha", 0),
CO_NESTED)
generators = _Feature((2, 2, 0, "alpha", 1),
(2, 3, 0, "final", 0),
CO_GENERATOR_ALLOWED)
division = _Feature((2, 2, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_DIVISION)
absolute_import = _Feature((2, 5, 0, "alpha", 1),
(3, 0, 0, "alpha", 0),
CO_FUTURE_ABSOLUTE_IMPORT)
with_statement = _Feature((2, 5, 0, "alpha", 1),
(2, 6, 0, "alpha", 0),
CO_FUTURE_WITH_STATEMENT)
print_function = _Feature((2, 6, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_PRINT_FUNCTION)
unicode_literals = _Feature((2, 6, 0, "alpha", 2),
(3, 0, 0, "alpha", 0),
CO_FUTURE_UNICODE_LITERALS)
barry_as_FLUFL = _Feature((3, 1, 0, "alpha", 2),
(4, 0, 0, "alpha", 0),
CO_FUTURE_BARRY_AS_BDFL)
generator_stop = _Feature((3, 5, 0, "beta", 1),
(3, 7, 0, "alpha", 0),
CO_FUTURE_GENERATOR_STOP)
annotations = _Feature((3, 7, 0, "beta", 1),
(4, 0, 0, "alpha", 0),
CO_FUTURE_ANNOTATIONS)
|
{
"content_hash": "36a51992872a1838cbb6903300fb1128",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 86,
"avg_line_length": 34.93835616438356,
"alnum_prop": 0.606743775730249,
"repo_name": "xyuanmu/XX-Net",
"id": "e1135685d846ce7064559aa4aa4c24839f7145aa",
"size": "5101",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "python3.8.2/Lib/__future__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4145"
},
{
"name": "C",
"bytes": "53301"
},
{
"name": "CSS",
"bytes": "94951"
},
{
"name": "HTML",
"bytes": "252022"
},
{
"name": "JavaScript",
"bytes": "22405"
},
{
"name": "Python",
"bytes": "15474534"
},
{
"name": "Shell",
"bytes": "10208"
},
{
"name": "Visual Basic",
"bytes": "1795"
}
],
"symlink_target": ""
}
|
from pastebin.models import Paste
class PasteRemover:
def __init__(self):
from pastebin.core.paste_dates import PasteDates
import logging
super().__init__()
self.paste_dates = PasteDates()
self.logger = logging.getLogger(__name__)
def removeExpiredPastes(self):
cur_dt = self.paste_dates.now()
filtered = Paste.objects.filter(expiry_date__lte=cur_dt)
self.logger.info("Found {} objects to be removed".format(len(filtered)))
if(len(filtered)>0):
filtered.delete()
|
{
"content_hash": "83e9e84e2c07553db5e568785df91d81",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 80,
"avg_line_length": 34.75,
"alnum_prop": 0.6276978417266187,
"repo_name": "johannessarpola/django-pastebin",
"id": "fda706645823155f3844e22442f1a26d10aa4daa",
"size": "556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pastebin/core/paste_remover.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "139"
},
{
"name": "HTML",
"bytes": "9950"
},
{
"name": "JavaScript",
"bytes": "3363"
},
{
"name": "Python",
"bytes": "56138"
}
],
"symlink_target": ""
}
|
"""Parse a timespec (https://github.com/fenhl/timespec).
Usage:
timespec [options] [<predicate>...]
timespec -h | --help
timespec --version
Options:
-f, --format=<format> Print the date in the given format. [Default: %Y-%m-%d %H:%M:%S]
-h, --help Print this message and exit.
-r, --reverse Use reverse direction instead of chronological.
-s, --start=<datetime> Use this datetime, given in --format, as the start time. Defaults to the current datetime.
-z, --timezone=<timezone> Use this timezone from the Olson timezone database. Defaults to UTC.
--version Print version info and exit.
"""
import sys
sys.path.append('/opt/py')
import datetime
import docopt
import pytz
import timespec
__version__ = timespec.__version__
if __name__ == '__main__':
arguments = docopt.docopt(__doc__, version='timespec {}'.format(__version__))
if arguments['--timezone']:
tz = pytz.timezone(arguments['--timezone'])
else:
tz = pytz.utc
if arguments['--start']:
start = tz.localize(datetime.datetime.strptime(arguments['--start'], arguments['--format']))
else:
start = datetime.datetime.now(datetime.timezone.utc).astimezone(tz)
result = timespec.parse(arguments['<predicate>'], reverse=arguments['--reverse'], start=start, tz=tz) #TODO read candidates from stdin
print(format(result, arguments['--format']))
|
{
"content_hash": "0c85a5a154016353a5963c562804a732",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 138,
"avg_line_length": 36.69230769230769,
"alnum_prop": 0.6450034940600978,
"repo_name": "fenhl/python-timespec",
"id": "043fc2934eb80fa99f0d277a44c16e299d4fdf4d",
"size": "1455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "timespec/__main__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6163"
}
],
"symlink_target": ""
}
|
from OpenGLCffi.GL import params
@params(api='gl', prms=['pname', 'index', 'val'])
def glSetMultisamplefvAMD(pname, index, val):
pass
|
{
"content_hash": "ad19bd924369d3271aceedbb464d23be",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 49,
"avg_line_length": 22.833333333333332,
"alnum_prop": 0.708029197080292,
"repo_name": "cydenix/OpenGLCffi",
"id": "0e56d509fb2ed96d077ab27ce64cbadf428d7a28",
"size": "137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OpenGLCffi/GL/EXT/AMD/sample_positions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1646"
},
{
"name": "C++",
"bytes": "188"
},
{
"name": "Python",
"bytes": "1853617"
}
],
"symlink_target": ""
}
|
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._managed_private_endpoints_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_by_factory_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagedPrivateEndpointsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.datafactory.aio.DataFactoryManagementClient`'s
:attr:`managed_private_endpoints` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_factory(
self, resource_group_name: str, factory_name: str, managed_virtual_network_name: str, **kwargs: Any
) -> AsyncIterable["_models.ManagedPrivateEndpointResource"]:
"""Lists managed private endpoints.
:param resource_group_name: The resource group name. Required.
:type resource_group_name: str
:param factory_name: The factory name. Required.
:type factory_name: str
:param managed_virtual_network_name: Managed virtual network name. Required.
:type managed_virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedPrivateEndpointResource or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.datafactory.models.ManagedPrivateEndpointResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2018-06-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagedPrivateEndpointListResponse]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_factory_request(
resource_group_name=resource_group_name,
factory_name=factory_name,
managed_virtual_network_name=managed_virtual_network_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_factory.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ManagedPrivateEndpointListResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_factory.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/managedVirtualNetworks/{managedVirtualNetworkName}/managedPrivateEndpoints"} # type: ignore
@overload
async def create_or_update(
self,
resource_group_name: str,
factory_name: str,
managed_virtual_network_name: str,
managed_private_endpoint_name: str,
managed_private_endpoint: _models.ManagedPrivateEndpointResource,
if_match: Optional[str] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagedPrivateEndpointResource:
"""Creates or updates a managed private endpoint.
:param resource_group_name: The resource group name. Required.
:type resource_group_name: str
:param factory_name: The factory name. Required.
:type factory_name: str
:param managed_virtual_network_name: Managed virtual network name. Required.
:type managed_virtual_network_name: str
:param managed_private_endpoint_name: Managed private endpoint name. Required.
:type managed_private_endpoint_name: str
:param managed_private_endpoint: Managed private endpoint resource definition. Required.
:type managed_private_endpoint: ~azure.mgmt.datafactory.models.ManagedPrivateEndpointResource
:param if_match: ETag of the managed private endpoint entity. Should only be specified for
update, for which it should match existing entity or can be * for unconditional update. Default
value is None.
:type if_match: str
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedPrivateEndpointResource or the result of cls(response)
:rtype: ~azure.mgmt.datafactory.models.ManagedPrivateEndpointResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self,
resource_group_name: str,
factory_name: str,
managed_virtual_network_name: str,
managed_private_endpoint_name: str,
managed_private_endpoint: IO,
if_match: Optional[str] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.ManagedPrivateEndpointResource:
"""Creates or updates a managed private endpoint.
:param resource_group_name: The resource group name. Required.
:type resource_group_name: str
:param factory_name: The factory name. Required.
:type factory_name: str
:param managed_virtual_network_name: Managed virtual network name. Required.
:type managed_virtual_network_name: str
:param managed_private_endpoint_name: Managed private endpoint name. Required.
:type managed_private_endpoint_name: str
:param managed_private_endpoint: Managed private endpoint resource definition. Required.
:type managed_private_endpoint: IO
:param if_match: ETag of the managed private endpoint entity. Should only be specified for
update, for which it should match existing entity or can be * for unconditional update. Default
value is None.
:type if_match: str
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedPrivateEndpointResource or the result of cls(response)
:rtype: ~azure.mgmt.datafactory.models.ManagedPrivateEndpointResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
factory_name: str,
managed_virtual_network_name: str,
managed_private_endpoint_name: str,
managed_private_endpoint: Union[_models.ManagedPrivateEndpointResource, IO],
if_match: Optional[str] = None,
**kwargs: Any
) -> _models.ManagedPrivateEndpointResource:
"""Creates or updates a managed private endpoint.
:param resource_group_name: The resource group name. Required.
:type resource_group_name: str
:param factory_name: The factory name. Required.
:type factory_name: str
:param managed_virtual_network_name: Managed virtual network name. Required.
:type managed_virtual_network_name: str
:param managed_private_endpoint_name: Managed private endpoint name. Required.
:type managed_private_endpoint_name: str
:param managed_private_endpoint: Managed private endpoint resource definition. Is either a
model type or a IO type. Required.
:type managed_private_endpoint: ~azure.mgmt.datafactory.models.ManagedPrivateEndpointResource
or IO
:param if_match: ETag of the managed private endpoint entity. Should only be specified for
update, for which it should match existing entity or can be * for unconditional update. Default
value is None.
:type if_match: str
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedPrivateEndpointResource or the result of cls(response)
:rtype: ~azure.mgmt.datafactory.models.ManagedPrivateEndpointResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2018-06-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagedPrivateEndpointResource]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(managed_private_endpoint, (IO, bytes)):
_content = managed_private_endpoint
else:
_json = self._serialize.body(managed_private_endpoint, "ManagedPrivateEndpointResource")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
factory_name=factory_name,
managed_virtual_network_name=managed_virtual_network_name,
managed_private_endpoint_name=managed_private_endpoint_name,
subscription_id=self._config.subscription_id,
if_match=if_match,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ManagedPrivateEndpointResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/managedVirtualNetworks/{managedVirtualNetworkName}/managedPrivateEndpoints/{managedPrivateEndpointName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
factory_name: str,
managed_virtual_network_name: str,
managed_private_endpoint_name: str,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> _models.ManagedPrivateEndpointResource:
"""Gets a managed private endpoint.
:param resource_group_name: The resource group name. Required.
:type resource_group_name: str
:param factory_name: The factory name. Required.
:type factory_name: str
:param managed_virtual_network_name: Managed virtual network name. Required.
:type managed_virtual_network_name: str
:param managed_private_endpoint_name: Managed private endpoint name. Required.
:type managed_private_endpoint_name: str
:param if_none_match: ETag of the managed private endpoint entity. Should only be specified for
get. If the ETag matches the existing entity tag, or if * was provided, then no content will be
returned. Default value is None.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedPrivateEndpointResource or the result of cls(response)
:rtype: ~azure.mgmt.datafactory.models.ManagedPrivateEndpointResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2018-06-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ManagedPrivateEndpointResource]
request = build_get_request(
resource_group_name=resource_group_name,
factory_name=factory_name,
managed_virtual_network_name=managed_virtual_network_name,
managed_private_endpoint_name=managed_private_endpoint_name,
subscription_id=self._config.subscription_id,
if_none_match=if_none_match,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("ManagedPrivateEndpointResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/managedVirtualNetworks/{managedVirtualNetworkName}/managedPrivateEndpoints/{managedPrivateEndpointName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
factory_name: str,
managed_virtual_network_name: str,
managed_private_endpoint_name: str,
**kwargs: Any
) -> None:
"""Deletes a managed private endpoint.
:param resource_group_name: The resource group name. Required.
:type resource_group_name: str
:param factory_name: The factory name. Required.
:type factory_name: str
:param managed_virtual_network_name: Managed virtual network name. Required.
:type managed_virtual_network_name: str
:param managed_private_endpoint_name: Managed private endpoint name. Required.
:type managed_private_endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2018-06-01"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
factory_name=factory_name,
managed_virtual_network_name=managed_virtual_network_name,
managed_private_endpoint_name=managed_private_endpoint_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/managedVirtualNetworks/{managedVirtualNetworkName}/managedPrivateEndpoints/{managedPrivateEndpointName}"} # type: ignore
|
{
"content_hash": "54303e1054b5eb2664a99aae9350be80",
"timestamp": "",
"source": "github",
"line_count": 473,
"max_line_length": 285,
"avg_line_length": 46.35095137420719,
"alnum_prop": 0.6557653712826127,
"repo_name": "Azure/azure-sdk-for-python",
"id": "2239718402b3692a38c3fbfa64e8773998ccc57a",
"size": "22424",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/datafactory/azure-mgmt-datafactory/azure/mgmt/datafactory/aio/operations/_managed_private_endpoints_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from senlin.tests.functional import api as test_api
from senlin.tests.functional import base
from senlin.tests.functional.utils import test_utils
class TestReceiver(base.SenlinFunctionalTest):
def setUp(self):
super(TestReceiver, self).setUp()
# Create profile
self.profile = test_api.create_profile(
self.client, test_utils.random_name('profile'),
test_utils.spec_nova_server)
def tearDown(self):
# Delete profile
test_api.delete_profile(self.client, self.profile['id'])
super(TestReceiver, self).tearDown()
class TestWebhook(TestReceiver):
def test_webhook(self):
# Create cluster
desired_capacity = 2
min_size = 1
max_size = 5
cluster = test_api.create_cluster(self.client,
test_utils.random_name('cluster'),
self.profile['id'], desired_capacity,
min_size, max_size)
cluster = test_utils.wait_for_status(test_api.get_cluster, self.client,
cluster['id'], 'ACTIVE')
# Create a webhook receiver targets on cluster resize action
# with params
params = {
'adjustment_type': 'EXACT_CAPACITY',
'number': 2,
}
webhook_name = test_utils.random_name('webhook')
res = test_api.create_receiver(self.client, webhook_name,
cluster['id'], 'CLUSTER_RESIZE',
'webhook', params=params)
r_webhook = test_api.get_receiver(self.client, res['id'])
# Verify webhook params
self.assertEqual(webhook_name, r_webhook['name'])
self.assertEqual(cluster['id'], r_webhook['cluster_id'])
self.assertEqual('CLUSTER_RESIZE', r_webhook['action'])
self.assertEqual(params, r_webhook['params'])
webhook_url = r_webhook['channel']['alarm_url']
# Trigger webhook and wait for action complete
action_id = test_api.trigger_webhook(webhook_url)
test_utils.wait_for_status(test_api.get_action, self.client,
action_id, 'SUCCEEDED')
# Verify action is as expected
action = test_api.get_action(self.client, action_id)
self.assertEqual('CLUSTER_RESIZE', action['action'])
self.assertEqual(cluster['id'], action['target'])
self.assertEqual(params, action['inputs'])
# Verify cluster resize result
cluster = test_api.get_cluster(self.client, cluster['id'])
self.assertEqual('ACTIVE', cluster['status'])
self.assertEqual(2, len(cluster['nodes']))
# Delete webhook receiver
test_api.delete_receiver(self.client, r_webhook['id'])
# Delete cluster
test_api.delete_cluster(self.client, cluster['id'])
test_utils.wait_for_status(test_api.get_cluster, self.client,
cluster['id'], 'DELETED',
ignore_missing=True)
|
{
"content_hash": "5c6db24baeea906b6ff60f53651bb0dd",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 79,
"avg_line_length": 41.626666666666665,
"alnum_prop": 0.572709801409353,
"repo_name": "tengqm/senlin-container",
"id": "a43e5a97d40e1b111cd3d1c0079256f37cd05c54",
"size": "3668",
"binary": false,
"copies": "1",
"ref": "refs/heads/container_cluster_support",
"path": "senlin/tests/functional/test_receiver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2239281"
},
{
"name": "Shell",
"bytes": "18730"
}
],
"symlink_target": ""
}
|
"""
Support for SQLAlchemy. Provides SQLAlchemyTarget for storing in databases
supported by SQLAlchemy. The user would be responsible for installing the
required database driver to connect using SQLAlchemy.
Minimal example of a job to copy data to database using SQLAlchemy is as shown
below:
.. code-block:: python
from sqlalchemy import String
import luigi
from luigi.contrib import sqla
class SQLATask(sqla.CopyToTable):
# columns defines the table schema, with each element corresponding
# to a column in the format (args, kwargs) which will be sent to
# the sqlalchemy.Column(*args, **kwargs)
columns = [
(["item", String(64)], {"primary_key": True}),
(["property", String(64)], {})
]
connection_string = "sqlite://" # in memory SQLite database
table = "item_property" # name of the table to store data
def rows(self):
for row in [("item1", "property1"), ("item2", "property2")]:
yield row
if __name__ == '__main__':
task = SQLATask()
luigi.build([task], local_scheduler=True)
If the target table where the data needs to be copied already exists, then
the column schema definition can be skipped and instead the reflect flag
can be set as True. Here is a modified version of the above example:
.. code-block:: python
from sqlalchemy import String
import luigi
from luigi.contrib import sqla
class SQLATask(sqla.CopyToTable):
# If database table is already created, then the schema can be loaded
# by setting the reflect flag to True
reflect = True
connection_string = "sqlite://" # in memory SQLite database
table = "item_property" # name of the table to store data
def rows(self):
for row in [("item1", "property1"), ("item2", "property2")]:
yield row
if __name__ == '__main__':
task = SQLATask()
luigi.build([task], local_scheduler=True)
In the above examples, the data that needs to be copied was directly provided by
overriding the rows method. Alternately, if the data comes from another task, the
modified example would look as shown below:
.. code-block:: python
from sqlalchemy import String
import luigi
from luigi.contrib import sqla
from luigi.mock import MockTarget
class BaseTask(luigi.Task):
def output(self):
return MockTarget("BaseTask")
def run(self):
out = self.output().open("w")
TASK_LIST = ["item%d\\tproperty%d\\n" % (i, i) for i in range(10)]
for task in TASK_LIST:
out.write(task)
out.close()
class SQLATask(sqla.CopyToTable):
# columns defines the table schema, with each element corresponding
# to a column in the format (args, kwargs) which will be sent to
# the sqlalchemy.Column(*args, **kwargs)
columns = [
(["item", String(64)], {"primary_key": True}),
(["property", String(64)], {})
]
connection_string = "sqlite://" # in memory SQLite database
table = "item_property" # name of the table to store data
def requires(self):
return BaseTask()
if __name__ == '__main__':
task1, task2 = SQLATask(), BaseTask()
luigi.build([task1, task2], local_scheduler=True)
In the above example, the output from `BaseTask` is copied into the
database. Here we did not have to implement the `rows` method because
by default `rows` implementation assumes every line is a row with
column values separated by a tab. One can define `column_separator`
option for the task if the values are say comma separated instead of
tab separated.
You can pass in database specific connection arguments by setting the connect_args
dictionary. The options will be passed directly to the DBAPI's connect method as
keyword arguments.
The other option to `sqla.CopyToTable` that can be of help with performance aspect is the
`chunk_size`. The default is 5000. This is the number of rows that will be inserted in
a transaction at a time. Depending on the size of the inserts, this value can be tuned
for performance.
See here for a `tutorial on building task pipelines using luigi
<http://gouthamanbalaraman.com/blog/building-luigi-task-pipeline.html>`_ and
using `SQLAlchemy in workflow pipelines <http://gouthamanbalaraman.com/blog/sqlalchemy-luigi-workflow-pipeline.html>`_.
Author: Gouthaman Balaraman
Date: 01/02/2015
"""
import abc
import collections
import datetime
import itertools
import logging
import luigi
import os
import sqlalchemy
class SQLAlchemyTarget(luigi.Target):
"""
Database target using SQLAlchemy.
This will rarely have to be directly instantiated by the user.
Typical usage would be to override `luigi.contrib.sqla.CopyToTable` class
to create a task to write to the database.
"""
marker_table = None
_engine_dict = {} # dict of sqlalchemy engine instances
Connection = collections.namedtuple("Connection", "engine pid")
def __init__(self, connection_string, target_table, update_id, echo=False, connect_args=None):
"""
Constructor for the SQLAlchemyTarget.
:param connection_string: SQLAlchemy connection string
:type connection_string: str
:param target_table: The table name for the data
:type target_table: str
:param update_id: An identifier for this data set
:type update_id: str
:param echo: Flag to setup SQLAlchemy logging
:type echo: bool
:param connect_args: A dictionary of connection arguments
:type connect_args: dict
:return:
"""
if connect_args is None:
connect_args = {}
self.target_table = target_table
self.update_id = update_id
self.connection_string = connection_string
self.echo = echo
self.connect_args = connect_args
self.marker_table_bound = None
@property
def engine(self):
"""
Return an engine instance, creating it if it doesn't exist.
Recreate the engine connection if it wasn't originally created
by the current process.
"""
pid = os.getpid()
conn = SQLAlchemyTarget._engine_dict.get(self.connection_string)
if not conn or conn.pid != pid:
# create and reset connection
engine = sqlalchemy.create_engine(
self.connection_string,
connect_args=self.connect_args,
echo=self.echo
)
SQLAlchemyTarget._engine_dict[self.connection_string] = self.Connection(engine, pid)
return SQLAlchemyTarget._engine_dict[self.connection_string].engine
def touch(self):
"""
Mark this update as complete.
"""
if self.marker_table_bound is None:
self.create_marker_table()
table = self.marker_table_bound
id_exists = self.exists()
with self.engine.begin() as conn:
if not id_exists:
ins = table.insert().values(update_id=self.update_id, target_table=self.target_table,
inserted=datetime.datetime.now())
else:
ins = table.update().where(sqlalchemy.and_(table.c.update_id == self.update_id,
table.c.target_table == self.target_table)).\
values(update_id=self.update_id, target_table=self.target_table,
inserted=datetime.datetime.now())
conn.execute(ins)
assert self.exists()
def exists(self):
row = None
if self.marker_table_bound is None:
self.create_marker_table()
with self.engine.begin() as conn:
table = self.marker_table_bound
s = sqlalchemy.select([table]).where(sqlalchemy.and_(table.c.update_id == self.update_id,
table.c.target_table == self.target_table)).limit(1)
row = conn.execute(s).fetchone()
return row is not None
def create_marker_table(self):
"""
Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset.
"""
if self.marker_table is None:
self.marker_table = luigi.configuration.get_config().get('sqlalchemy', 'marker-table', 'table_updates')
engine = self.engine
with engine.begin() as con:
metadata = sqlalchemy.MetaData()
if not con.dialect.has_table(con, self.marker_table):
self.marker_table_bound = sqlalchemy.Table(
self.marker_table, metadata,
sqlalchemy.Column("update_id", sqlalchemy.String(128), primary_key=True),
sqlalchemy.Column("target_table", sqlalchemy.String(128)),
sqlalchemy.Column("inserted", sqlalchemy.DateTime, default=datetime.datetime.now()))
metadata.create_all(engine)
else:
metadata.reflect(only=[self.marker_table], bind=engine)
self.marker_table_bound = metadata.tables[self.marker_table]
def open(self, mode):
raise NotImplementedError("Cannot open() SQLAlchemyTarget")
class CopyToTable(luigi.Task):
"""
An abstract task for inserting a data set into SQLAlchemy RDBMS
Usage:
* subclass and override the required `connection_string`, `table` and `columns` attributes.
* optionally override the `schema` attribute to use a different schema for
the target table.
"""
_logger = logging.getLogger('luigi-interface')
echo = False
connect_args = {}
@property
@abc.abstractmethod
def connection_string(self):
return None
@property
@abc.abstractmethod
def table(self):
return None
# specify the columns that define the schema. The format for the columns is a list
# of tuples. For example :
# columns = [
# (["id", sqlalchemy.Integer], dict(primary_key=True)),
# (["name", sqlalchemy.String(64)], {}),
# (["value", sqlalchemy.String(64)], {})
# ]
# The tuple (args_list, kwargs_dict) here is the args and kwargs
# that need to be passed to sqlalchemy.Column(*args, **kwargs).
# If the tables have already been setup by another process, then you can
# completely ignore the columns. Instead set the reflect value to True below
columns = []
# Specify the database schema of the target table, if supported by the
# RDBMS. Note that this doesn't change the schema of the marker table.
# The schema MUST already exist in the database, or this will task fail.
schema = ''
# options
column_separator = "\t" # how columns are separated in the file copied into postgres
chunk_size = 5000 # default chunk size for insert
reflect = False # Set this to true only if the table has already been created by alternate means
def create_table(self, engine):
"""
Override to provide code for creating the target table.
By default it will be created using types specified in columns.
If the table exists, then it binds to the existing table.
If overridden, use the provided connection object for setting up the table in order to
create the table and insert data using the same transaction.
:param engine: The sqlalchemy engine instance
:type engine: object
"""
def construct_sqla_columns(columns):
retval = [sqlalchemy.Column(*c[0], **c[1]) for c in columns]
return retval
needs_setup = (len(self.columns) == 0) or (False in [len(c) == 2 for c in self.columns]) if not self.reflect else False
if needs_setup:
# only names of columns specified, no types
raise NotImplementedError("create_table() not implemented for %r and columns types not specified" % self.table)
else:
# if columns is specified as (name, type) tuples
with engine.begin() as con:
if self.schema:
metadata = sqlalchemy.MetaData(schema=self.schema)
else:
metadata = sqlalchemy.MetaData()
try:
if not con.dialect.has_table(con, self.table, self.schema or None):
sqla_columns = construct_sqla_columns(self.columns)
self.table_bound = sqlalchemy.Table(self.table, metadata, *sqla_columns)
metadata.create_all(engine)
else:
full_table = '.'.join([self.schema, self.table]) if self.schema else self.table
metadata.reflect(only=[self.table], bind=engine)
self.table_bound = metadata.tables[full_table]
except Exception as e:
self._logger.exception(self.table + str(e))
def update_id(self):
"""
This update id will be a unique identifier for this insert on this table.
"""
return self.task_id
def output(self):
return SQLAlchemyTarget(
connection_string=self.connection_string,
target_table=self.table,
update_id=self.update_id(),
connect_args=self.connect_args,
echo=self.echo)
def rows(self):
"""
Return/yield tuples or lists corresponding to each row to be inserted.
This method can be overridden for custom file types or formats.
"""
with self.input().open('r') as fobj:
for line in fobj:
yield line.strip("\n").split(self.column_separator)
def run(self):
self._logger.info("Running task copy to table for update id %s for table %s" % (self.update_id(), self.table))
output = self.output()
engine = output.engine
self.create_table(engine)
with engine.begin() as conn:
rows = iter(self.rows())
ins_rows = [dict(zip(("_" + c.key for c in self.table_bound.c), row))
for row in itertools.islice(rows, self.chunk_size)]
while ins_rows:
self.copy(conn, ins_rows, self.table_bound)
ins_rows = [dict(zip(("_" + c.key for c in self.table_bound.c), row))
for row in itertools.islice(rows, self.chunk_size)]
self._logger.info("Finished inserting %d rows into SQLAlchemy target" % len(ins_rows))
output.touch()
self._logger.info("Finished inserting rows into SQLAlchemy target")
def copy(self, conn, ins_rows, table_bound):
"""
This method does the actual insertion of the rows of data given by ins_rows into the
database. A task that needs row updates instead of insertions should overload this method.
:param conn: The sqlalchemy connection object
:param ins_rows: The dictionary of rows with the keys in the format _<column_name>. For example
if you have a table with a column name "property", then the key in the dictionary
would be "_property". This format is consistent with the bindparam usage in sqlalchemy.
:param table_bound: The object referring to the table
:return:
"""
bound_cols = dict((c, sqlalchemy.bindparam("_" + c.key)) for c in table_bound.columns)
ins = table_bound.insert().values(bound_cols)
conn.execute(ins, ins_rows)
|
{
"content_hash": "c6064438bca19ada618823047a70a34f",
"timestamp": "",
"source": "github",
"line_count": 399,
"max_line_length": 127,
"avg_line_length": 39.50877192982456,
"alnum_prop": 0.6198934280639432,
"repo_name": "dlstadther/luigi",
"id": "ad8a352df148e168c860d21dd1b82e590cb8803d",
"size": "16375",
"binary": false,
"copies": "3",
"ref": "refs/heads/upstream-master",
"path": "luigi/contrib/sqla.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5575"
},
{
"name": "HTML",
"bytes": "43591"
},
{
"name": "JavaScript",
"bytes": "178078"
},
{
"name": "Python",
"bytes": "2150576"
},
{
"name": "Shell",
"bytes": "2973"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils import timezone
from django.utils.http import urlquote
from django.utils.translation import ugettext_lazy as _
from django.core.mail import send_mail
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from pprint import pprint
# ==========================================================================================
# ==========================================================================================
#
# ALTERNATIVE USER AUTH RECORD (bigger fields, username = facebook_id, email)
#
# ==========================================================================================
def store_user_email(backend, user, response, *args, **kwargs):
if backend.name == 'facebook':
user.username = response.get('id')
if response.get('email'):
user.email = response.get('email')
user.save()
class UserManager(BaseUserManager):
def _create_user(self, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given email and password.
"""
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
return self._create_user(email, password, False, False,
**extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, True,
**extra_fields)
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(_('email address'), max_length=254, unique=True)
username = models.CharField(_('username/facebook_id'), max_length=254, blank=True)
first_name = models.CharField(_('first name'), max_length=254, blank=True)
last_name = models.CharField(_('last name'), max_length=254, blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
level_data = models.TextField()
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def get_absolute_url(self):
return "/users/%s/" % urlquote(self.email)
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def get_userpic(self):
"Returns the url for user picture."
return '//graph.facebook.com/%s/picture' % self.username
def email_user(self, subject, message, from_email=None):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email])
# ==========================================================================================
# ==========================================================================================
#
# COMMENTS MODELS
#
# ==========================================================================================
class Comment(models.Model):
user = models.ForeignKey(User)
tag = models.CharField(max_length=254, blank=True)
text = models.TextField()
active = models.IntegerField(default=1)
dt_created = models.DateTimeField(auto_now_add=True)
class Reply(models.Model):
comment = models.ForeignKey(Comment)
user = models.ForeignKey(User)
text = models.TextField()
active = models.IntegerField(default=1)
dt_created = models.DateTimeField(auto_now_add=True)
|
{
"content_hash": "d58fd6a010e81b689c8fee47d4acb89c",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 92,
"avg_line_length": 37.46341463414634,
"alnum_prop": 0.5553385416666666,
"repo_name": "makiwara/onemoretime",
"id": "97dd83d715a6e3959c0fbc91ae002865ede05503",
"size": "4633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "comments/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "17"
},
{
"name": "CSS",
"bytes": "6925"
},
{
"name": "HTML",
"bytes": "6284"
},
{
"name": "JavaScript",
"bytes": "30163"
},
{
"name": "Python",
"bytes": "21501"
},
{
"name": "Shell",
"bytes": "38"
}
],
"symlink_target": ""
}
|
"""Tests for TempJunction."""
import os
import unittest
# Do not edit this line. Copybara replaces it with PY2 migration helper.
from src.test.py.bazel import test_base
from tools.android import junction
class JunctionTest(test_base.TestBase):
"""Unit tests for junction.py."""
def _AssertCreateJunctionWhenTargetsParentsDontExist(self, max_path=None):
def tempdir():
return self.ScratchDir("junc temp")
target = self.Path("this directory/should not\\yet exist")
self.assertFalse(os.path.exists(os.path.dirname(os.path.dirname(target))))
# Make the `target` path a non-normalized Windows path with a space in it
# which doesn't even exist.
# TempJunction should still work; it should:
# - normalize the path, and
# - create all directories on the path
# target = os.path.dirname(target) + "/junc target"
juncpath = None
with junction.TempJunction(
target, testonly_mkdtemp=tempdir, testonly_maxpath=max_path) as j:
juncpath = j
# Ensure that `j` created the junction.
self.assertTrue(os.path.exists(target))
self.assertTrue(os.path.exists(juncpath))
self.assertTrue(
juncpath.endswith(os.path.join("junc temp", "j")))
self.assertTrue(os.path.isabs(juncpath))
# Create a file under the junction.
filepath = os.path.join(juncpath, "some file.txt")
with open(filepath, "w") as f:
f.write("hello")
# Ensure we can reach the file via the junction and the target directory.
self.assertTrue(os.path.exists(os.path.join(target, "some file.txt")))
self.assertTrue(os.path.exists(os.path.join(juncpath, "some file.txt")))
# Ensure that after the `with` block the junction and temp directories no
# longer exist, but we can still reach the file via the target directory.
self.assertTrue(os.path.exists(os.path.join(target, "some file.txt")))
self.assertFalse(os.path.exists(os.path.join(juncpath, "some file.txt")))
self.assertFalse(os.path.exists(juncpath))
self.assertFalse(os.path.exists(os.path.dirname(juncpath)))
def testCreateJunctionWhenTargetsParentsDontExistAndPathIsShort(self):
self._AssertCreateJunctionWhenTargetsParentsDontExist()
def testCreateJunctionWhenTargetsParentsDontExistAndPathIsLong(self):
self._AssertCreateJunctionWhenTargetsParentsDontExist(1)
def testCannotCreateJunction(self):
def tempdir():
return self.ScratchDir("junc temp")
target = self.ScratchDir("junc target")
# Make the `target` path a non-normalized Windows path with a space in it.
# TempJunction should still work.
target = os.path.dirname(target) + "/junc target"
with junction.TempJunction(target, testonly_mkdtemp=tempdir) as j:
self.assertTrue(os.path.exists(j))
if os.name != "nt":
# Ensure that TempJunction raises a JunctionCreationError if it cannot
# create a junction. In this case the junction already exists in that
# directory.
# On Windows, we error out in a different way, so we skip the test and
# rely on this particular feature being correct on the other platforms
# as sufficient.
self.assertRaises(
junction.JunctionCreationError,
junction.TempJunction,
target,
testonly_mkdtemp=tempdir)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "96187d7d8c5e0f6972db04d56f8e632d",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 79,
"avg_line_length": 40.795180722891565,
"alnum_prop": 0.6978735971647962,
"repo_name": "cushon/bazel",
"id": "da60478499429bb194b7a460356bd47bc16c740f",
"size": "3988",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/android/junction_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2997"
},
{
"name": "C",
"bytes": "32332"
},
{
"name": "C++",
"bytes": "1704778"
},
{
"name": "HTML",
"bytes": "24957"
},
{
"name": "Java",
"bytes": "42185566"
},
{
"name": "Makefile",
"bytes": "248"
},
{
"name": "Objective-C",
"bytes": "10817"
},
{
"name": "Objective-C++",
"bytes": "1043"
},
{
"name": "PowerShell",
"bytes": "15431"
},
{
"name": "Python",
"bytes": "3594392"
},
{
"name": "Shell",
"bytes": "2718003"
},
{
"name": "Smarty",
"bytes": "30462"
},
{
"name": "Starlark",
"bytes": "26973"
}
],
"symlink_target": ""
}
|
r"""Entry point for Reincarnating RL experiments.
# pylint: disable=line-too-long
"""
import functools
import json
import os
from absl import app
from absl import flags
from absl import logging
from dopamine.discrete_domains import run_experiment as base_run_experiment
from dopamine.discrete_domains import train as base_train
from jax.config import config
import numpy as np
from reincarnating_rl import dqfd_dqn_agent
from reincarnating_rl import jsrl_dqn_agent
from reincarnating_rl import pretrained_dqn_agent
from reincarnating_rl import qdagger_dqn_agent
from reincarnating_rl import qdagger_rainbow_agent
from reincarnating_rl import reloaded_dqn_agent
from reincarnating_rl import run_experiment
from reincarnating_rl import teacher_dqn_agent
from reincarnating_rl import teacher_rainbow_agent
import tensorflow as tf
FLAGS = flags.FLAGS
AGENTS = [
'qdagger_dqn',
'reloaded_dqn',
'pretrained_dqn',
'jsrl_dqn',
'qdagger_rainbow',
'dqfd_dqn',
]
TEACHER_AGENTS = ['dqn']
PRETRAINING_AGENTS = [
'pretrained_dqn',
'qdagger_dqn',
'qdagger_rainbow',
'dqfd_dqn',
]
# flags are defined when importing run_xm_preprocessing
flags.DEFINE_enum('agent', 'qdagger_dqn', AGENTS, 'Name of the agent.')
flags.DEFINE_boolean('disable_jit', False, 'Whether to use jit or not.')
flags.DEFINE_enum('teacher_agent', 'dqn', TEACHER_AGENTS, 'Teacher agent name.')
flags.DEFINE_integer('run_number', 1, 'Run number.')
flags.DEFINE_string(
'teacher_checkpoint_dir', None,
'Directory from which to load the teacher agent checkpoints.')
flags.DEFINE_integer(
'teacher_checkpoint_number', None, 'Checkpoint number of the teacher agent '
'that needs to be loaded.')
flags.DEFINE_string(
'teacher_checkpoint_file_prefix', 'ckpt', 'Checkpoint prefix')
def create_agent(
sess, # pylint: disable=unused-argument
environment,
seed,
agent='rainbow',
summary_writer=None):
"""Create persistent agent which pretrains using a teacher agent."""
if agent == 'qdagger_dqn':
agent_fn = qdagger_dqn_agent.QDaggerDQNAgent
elif agent == 'qdagger_rainbow':
# Pass a separate gin config for DrQ/Full Rainbow agent.
agent_fn = qdagger_rainbow_agent.QDaggerRainbowAgent
elif agent == 'reloaded_dqn':
agent_fn = reloaded_dqn_agent.ReloadedDQNAgent
elif agent == 'pretrained_dqn':
agent_fn = pretrained_dqn_agent.PretrainedDQNAgent
elif agent == 'jsrl_dqn':
agent_fn = jsrl_dqn_agent.JSRLAgent
elif agent == 'dqfd_dqn':
agent_fn = dqfd_dqn_agent.DQfDAgent
else:
raise ValueError(f'{agent} is not defined.')
return agent_fn(
num_actions=environment.action_space.n,
seed=seed,
summary_writer=summary_writer)
def create_teacher_agent(environment,
teacher_agent='dqn',
summary_writer=None):
"""Helper function for creating teacher agent."""
if teacher_agent == 'dqn':
return teacher_dqn_agent.TeacherDQNAgent(
num_actions=environment.action_space.n, summary_writer=summary_writer)
elif teacher_agent == 'rainbow':
return teacher_rainbow_agent.TeacherRainbowAgent(
num_actions=environment.action_space.n, summary_writer=summary_writer)
else:
raise ValueError(f'{teacher_agent} is not a defined agent.')
def set_random_seed(seed):
"""Set random seed for reproducibility."""
logging.info('Setting random seed: %d', seed)
os.environ['PYTHONHASHSEED'] = str(seed)
tf.random.set_seed(seed)
np.random.seed(seed)
def main(unused_argv):
"""Main method.
Args:
unused_argv: Arguments (unused).
"""
logging.set_verbosity(logging.INFO)
if FLAGS.disable_jit:
config.update('jax_disable_jit', True)
base_dir = FLAGS.base_dir
gin_files = FLAGS.gin_files
gin_bindings = FLAGS.gin_bindings
if FLAGS.teacher_checkpoint_dir is not None:
teacher_checkpoint_dir = os.path.join(FLAGS.teacher_checkpoint_dir,
'checkpoints')
else:
teacher_checkpoint_dir = None
# Add code for setting random seed using the run_number
set_random_seed(FLAGS.run_number)
base_run_experiment.load_gin_configs(gin_files, gin_bindings)
# Set the Jax agent seed using the run number
create_agent_fn = functools.partial(
create_agent, agent=FLAGS.agent, seed=FLAGS.run_number)
create_teacher_agent_fn = functools.partial(
create_teacher_agent, teacher_agent=FLAGS.teacher_agent)
if FLAGS.agent in PRETRAINING_AGENTS:
runner_fn = run_experiment.ReincarnationRunner
else:
runner_fn = run_experiment.RunnerWithTeacher
runner = runner_fn(
base_dir,
create_agent_fn,
create_teacher_agent_fn=create_teacher_agent_fn,
teacher_checkpoint_dir=teacher_checkpoint_dir,
teacher_checkpoint_file_prefix=FLAGS.teacher_checkpoint_file_prefix,
teacher_checkpoint_number=FLAGS.teacher_checkpoint_number)
runner.run_experiment()
if __name__ == '__main__':
flags.mark_flag_as_required('base_dir')
app.run(main)
|
{
"content_hash": "9c4709843ba3975fa2e848a5a1a23edb",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 80,
"avg_line_length": 31.572327044025158,
"alnum_prop": 0.7113545816733068,
"repo_name": "google-research/reincarnating_rl",
"id": "613dbbd33834561f1a07a2d5a29b648be7dd4ca4",
"size": "5630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reincarnating_rl/train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "130127"
}
],
"symlink_target": ""
}
|
'''
Created on Sep 26, 2013
@author: joshua
'''
from django.utils.cache import patch_response_headers
class CacheHeadersMiddleware(object):
""" overrides all Cache-Control settings on responses and injects headers from django.utils.cach.patch_response_headers """
def process_response(self, request, response):
if request.method in ['GET', 'HEAD']:
# dump current Cache-Control headers
del response['Cache-Control']
# inject headers
patch_response_headers(response)
return response
|
{
"content_hash": "7af275515ccf73c737aeadd445b2ed8a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 127,
"avg_line_length": 29.36842105263158,
"alnum_prop": 0.6756272401433692,
"repo_name": "blitzagency/django-chatterbox",
"id": "f68ccbf5e56b6d5024c76138104c0cc50552f5af",
"size": "558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/devbot/project/apps/utils/cache_headers_middleware.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1745"
},
{
"name": "HTML",
"bytes": "10759"
},
{
"name": "JavaScript",
"bytes": "38264"
},
{
"name": "Makefile",
"bytes": "1540"
},
{
"name": "Python",
"bytes": "209440"
},
{
"name": "Ruby",
"bytes": "3342"
},
{
"name": "SaltStack",
"bytes": "2743"
},
{
"name": "Scheme",
"bytes": "615"
}
],
"symlink_target": ""
}
|
import MySQLdb
from fabric.api import *
from cuisine import *
def stop():
with settings(warn_only=True):
sudo("service keepalived stop")
def start():
stop()
sudo("service keepalived start")
def uninstall():
"""Uninstall keepalived packages"""
package_clean('keepalived')
def install():
package_ensure('keepalived')
def configure():
sudo('echo "net.ipv4.ip_nonlocal_bind=1" >> /etc/sysctl.conf')
sudo('sysctl -p')
keepalived_conf = text_strip_margin('''
|# Configuration File for keepalived
|global_defs {
|# each load balancer should have a different ID
|# this will be used in SMTP alerts, so you should make
|# each router easily identifiable
|lvs_id LB1
|}
|
|#health-check for keepalive
|vrrp_script chk_nginx { # Requires keepalived-1.1.13
| #script "killall -0 nginx" # cheaper than pidof
| script ''"pidof nginx"''
| interval 2 # check every 2 seconds
| weight 2 # add 2 points of prio if OK
|}
|
|vrrp_instance VI_1 {
| state MASTER
| interface eth0
|
| # each virtual router id must be unique per instance name#
| virtual_router_id 51
| # MASTER and BACKUP state are determined by the priority
| # even if you specify MASTER as the state, the state will
| # be voted on by priority (so if your state is MASTER but
| # your priority is lower than the router with BACKUP, you
| # will lose the MASTER state)
| priority 101
|
| #check if we are still running
| track_script {
| chk_nginx
| }
|
| # these are the IP addresses that keepalived will setup on
| # this without this block, keepalived will not setup and
| # takedown the IP addresses
| virtual_ipaddress {
| 172.22.23.200
| }
|}
|'''
)
sudo('''echo '%s' > /etc/keepalived/keepalived.conf''' % keepalived_conf)
|
{
"content_hash": "76e5020300d44a250bba2ebd417be127",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 77,
"avg_line_length": 25.063291139240505,
"alnum_prop": 0.6065656565656565,
"repo_name": "StackOps/fabuloso-catalog-havana",
"id": "b976e8510a084acea3d299f7504e28dfc8e8d74f",
"size": "2621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keepalived/keepalived.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "334225"
}
],
"symlink_target": ""
}
|
"""
Holds the formencode validators for debexpo.
"""
__author__ = 'Jonny Lamb'
__copyright__ = 'Copyright © 2008 Jonny Lamb'
__license__ = 'MIT'
import formencode
import logging
import tempfile
from debexpo.lib.base import *
from debexpo.lib.utils import get_gnupg
from debexpo.model import meta
from debexpo.model.users import User
from debexpo.lib import constants
import debexpo.lib.utils
log = logging.getLogger(__name__)
class GpgKey(formencode.validators.FieldStorageUploadConverter):
"""
Validator for an uploaded GPG key. They must with the 'BEGIN PGP PUBLIC KEY BLOCK'
text.
"""
def __init__(self):
self.gpg_id = None
self.gnupg = get_gnupg()
def _to_python(self, value, c):
"""
Validate the GPG key.
``value``
FieldStorage uploaded file.
``c``
"""
key_block_raw = value.value
key_block_parsed = self.gnupg.parse_key_block(key_block_raw)
if self.gnupg.is_unusable:
log.error('Unable to validate GPG key because gpg is unusable.')
raise formencode.Invalid(_('Internal error: debexpo is not ' +
'properly configured to handle' +
'GPG keys'), value, c)
if key_block_parsed is None:
log.error('Given data is not a valid GPG key')
raise formencode.Invalid(_('Invalid GPG key'), value, c)
(self.gpg_id, user_ids) = key_block_parsed
if self.gpg_id is None:
log.error("Failed to parse GPG key")
raise formencode.Invalid(_('Invalid GPG key'), value, c)
"""
allow only keys which match the user name
"""
user = meta.session.query(User).get(session['user_id'])
for uid in user_ids:
if user.email == uid.email:
break
else:
log.debug("No user id in key %s does match the email address the user configured" % self.gpg_id.id)
raise formencode.Invalid(_('None of your user IDs in key %s does match your profile mail address' % (self.gpg_id.id)), value, c)
"""
Minimum Key Strength Check.
"""
requiredkeystrength = int(config['debexpo.gpg_minkeystrength'])
keystrength = key_block_parsed.key.strength
if keystrength < requiredkeystrength:
log.debug("Key strength unacceptable in Debian Keyring")
raise formencode.Invalid(_('Key strength unacceptable in Debian Keyring. The minimum required key strength is %s bits.' % str(requiredkeystrength)), value, c)
return formencode.validators.FieldStorageUploadConverter._to_python(self, value, c)
def key_id(self):
return self.gpg_id
class CurrentPassword(formencode.validators.String):
"""
Validator for a current password depending on the session's user_id.
"""
def _to_python(self, value, c):
"""
Validate the password.
"""
user = meta.session.query(User).get(session['user_id'])
if user.password != debexpo.lib.utils.hash_it(value):
log.error('Incorrect current password')
raise formencode.Invalid(_('Incorrect password'), value, c)
return formencode.validators.String._to_python(self, value, c)
class CheckBox(formencode.validators.Int):
"""
Validator for a checkbox. When not checked, it doesn't send, and formencode
complains.
"""
if_missing = None
class NewEmailToSystem(formencode.validators.Email):
"""
Email validator class to make sure there is not another user with
the same email address already registered.
"""
def _to_python(self, value, c=None):
"""
Validate the email address.
``value``
Address to validate.
``c``
"""
u = meta.session.query(User).filter_by(email=value)
# c.user_id can contain a user_id that should be ignored (i.e. when the user
# wants to keep the same email).
if hasattr(c, 'user_id'):
u = u.filter(User.id != c.user_id)
u = u.first()
if u is not None:
log.error('Email %s already found on system' % value)
raise formencode.Invalid(_('A user with this email address is already registered on the system'), value, c)
return formencode.validators.Email._to_python(self, value, c)
class NewNameToSystem(formencode.FancyValidator):
"""
Name validation class to make sure there is not another user with
the same name already registered.
"""
def _to_python(self, value, c=None):
"""
Validate the name address.
``value``
Name to validate.
``c``
"""
u = meta.session.query(User).filter_by(name=value)
# c.user_id can contain a user_id that should be ignored (i.e. when the user
# wants to keep the same email).
if hasattr(c, 'user_id'):
u = u.filter(User.id != c.user_id)
u = u.first()
if u is not None:
log.error('Name %s already found on system' % value)
raise formencode.Invalid(_('A user with this name is already registered on the system. If it is you, use that account! Otherwise use a different name to register.'), value, c)
return value
def ValidateSponsorEmail(values, state, validator):
if values['sponsor'] == '1' and not values['email'].endswith('@debian.org'):
return {'sponsor': 'A sponsor account must be registered with your @debian.org address' }
class DummyValidator(formencode.FancyValidator):
pass
def ValidatePackagingGuidelines(values, state, validator):
try:
if values['packaging_guidelines'] == constants.SPONSOR_GUIDELINES_TYPE_TEXT:
formencode.validators.String(min=1).to_python(values['packaging_guideline_text'])
elif values['packaging_guidelines'] == constants.SPONSOR_GUIDELINES_TYPE_URL:
formencode.validators.URL(add_http=True).to_python(values['packaging_guideline_text'])
else:
formencode.validators.Empty().to_python(values['packaging_guideline_text'])
except Exception as e:
return {'packaging_guideline_text': e}
return None
class DmupSignature(formencode.validators.FieldStorageUploadConverter):
"""
Validator for an uploaded GPG-signed file.
Checks the signature against the user's GPG key in the database.
"""
not_empty = True
def __init__(self):
self.gnupg = get_gnupg()
def _to_python(self, value, c):
v = self.gnupg.verify_file(data=value.value)
if v is None:
log.debug('Uploaded file is not GPG-signed')
raise formencode.Invalid(_('Not a gpg-signed file'), value, c)
log.debug('Uploaded file is GPG-signed')
user = meta.session.query(User).get(session['user_id'])
if not v.is_valid:
log.debug('The GPG signature cannot be verified')
raise formencode.Invalid(_('GPG signature not verified'), value, c)
if v.key_id is None:
log.debug("Cannot read GPG key information")
raise formencode.Invalid_("Not signed with the user's key", value, c)
# checking that the user has uploaded a gpg key
if user.gpg_id is None:
raise formencode.Invalid(_('You have not uploaded any GPG key'), value, c)
# checking that the file has been signed with the right key
if v.key_id[-8:] != user.gpg_id:
log.debug("File has not been signed by the user's key")
raise formencode.Invalid(_("Not signed with the user's key"), value, c)
log.debug("The file has been signed with the user's key")
# checking that the file's content is actually the expected
# agreement form
with open('debexpo/public/dmup_agreement_form.txt', 'r') as f:
user = meta.session.query(User).get(session['user_id'])
expected_data = f.read().format(name=user.name)
if v.data != expected_data:
log.debug("The file is not the expected agreement form.")
raise formencode.Invalid(_("Not the expected agreement form."), value, c)
return formencode.validators.FieldStorageUploadConverter._to_python(self, value, c)
|
{
"content_hash": "634c786322090222d6f37b3ef380f005",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 187,
"avg_line_length": 33.71370967741935,
"alnum_prop": 0.6209783518717856,
"repo_name": "jonnylamb/debexpo",
"id": "f3477cad099fe0ffac3d33fc24f112c10fec5c21",
"size": "9679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "debexpo/lib/validators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3986"
},
{
"name": "Python",
"bytes": "447913"
},
{
"name": "Shell",
"bytes": "3905"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
from trakt.core.helpers import from_iso8601_datetime
from trakt.objects.core.helpers import update_attributes
from trakt.objects.media import Media
class Video(Media):
def __init__(self, client, keys=None, index=None):
super(Video, self).__init__(client, keys, index)
self.action = None
"""
:type: :class:`~python:str`
Item action (e.g. history action: "checkin", "scrobble" or "watch")
"""
self.id = None
"""
:type: :class:`~python:long`
Item id (e.g. history id)
"""
self.collected_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this item was added to your collection (or `None`)
"""
self.paused_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this item was paused (or `None`)
"""
self.watched_at = None
"""
:type: :class:`~python:datetime.datetime`
Timestamp of when this item was watched (or `None`)
"""
self.progress = None
"""
:type: :class:`~python:float`
Playback progress for item (or `None`)
"""
# Flags
self.is_watched = None
"""
:type: :class:`~python:bool`
Flag indicating this item has been watched (or `None`)
"""
self.is_collected = None
"""
:type: :class:`~python:bool`
Flag indicating this item has been collected (or `None`)
"""
def _update(self, info=None, is_watched=None, is_collected=None, **kwargs):
if not info:
return
super(Video, self)._update(info, **kwargs)
update_attributes(self, info, [
'progress'
])
if 'action' in info:
self.action = info.get('action')
if 'id' in info:
self.id = info.get('id')
# Set timestamps
if 'collected_at' in info:
self.collected_at = from_iso8601_datetime(info.get('collected_at'))
if 'paused_at' in info:
self.paused_at = from_iso8601_datetime(info.get('paused_at'))
if 'watched_at' in info:
self.watched_at = from_iso8601_datetime(info.get('watched_at'))
# Set flags
if is_watched is not None:
self.is_watched = is_watched
if is_collected is not None:
self.is_collected = is_collected
|
{
"content_hash": "50a8f0791668262f9c210add60cc2cf7",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 79,
"avg_line_length": 25.49,
"alnum_prop": 0.5460965084346803,
"repo_name": "fuzeman/trakt.py",
"id": "539ceae1dfd4987a929a729adc9f48021ebf1a0c",
"size": "2549",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "trakt/objects/video.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "295322"
}
],
"symlink_target": ""
}
|
import copy
import sys
class Variant:
"""A list of string and values used to tweak build execution."""
def __init__(self, values=None):
self.values = set()
self.add(values)
def __contains__(self, item):
return item in self.values
def __str__(self):
return "Variant:" + str(self.values)
def add(self, item):
if item:
if isinstance(item, basestring):
self.values.add(item)
elif isinstance(item, list):
for x in item:
self.add(x)
elif isinstance(item, Variant):
self.values.update(item.values)
else:
print "Error: unknown type for " + str(item)
sys.exit(1)
def remove(self, item):
return self.values.remove(item)
def discard(self, item):
return self.values.discard(item)
def copy(self):
return copy.deepcopy(self)
|
{
"content_hash": "91121038b63cf07e4e089b77b9ac09c3",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 67,
"avg_line_length": 29.833333333333332,
"alnum_prop": 0.5810055865921788,
"repo_name": "LudoSapiens/Dev",
"id": "b3438e7614b866b76be79d116be3e721066ee305",
"size": "1417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Tools/BS/BuildSystem/Variants/Variant.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2481545"
},
{
"name": "C++",
"bytes": "6397842"
},
{
"name": "Java",
"bytes": "7734"
},
{
"name": "Lua",
"bytes": "36688"
},
{
"name": "Objective-C",
"bytes": "149497"
},
{
"name": "Python",
"bytes": "137094"
},
{
"name": "Shell",
"bytes": "2587"
}
],
"symlink_target": ""
}
|
import numpy as np
from matplotlib import pyplot as plt
from caid.io import geopdes
from caid.cad_geometry import cad_geometry
geo = cad_geometry()
filename = "/home/macahr/Téléchargements/geopdes_geometries/geo_Lshaped_mp.txt"
#filename = "geo_Lshaped_mp_b.txt"
IO = geopdes()
IO.read(filename, geo)
fig = plt.figure()
geo.plotMesh()
plt.xlim(-1.1,1.1)
plt.ylim(-1.1,1.1)
plt.show()
#geo.save(filename.split(".")[0]+".xml")
#nml_io.write('domain_selalib.nml',geo)
|
{
"content_hash": "fc16c1cb0abba12587489a4e5fbad3d7",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 79,
"avg_line_length": 18.96,
"alnum_prop": 0.7151898734177216,
"repo_name": "ratnania/caid",
"id": "17836e1f767aebbb52b18b165263ffa38232a606",
"size": "520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_io_geopdes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "84542"
},
{
"name": "Python",
"bytes": "1155845"
}
],
"symlink_target": ""
}
|
VERSION = (0, 2, 1, 'stable')
def get_release():
return '-'.join([get_version(), VERSION[-1]])
def get_version():
"""
Returns only digit parts of version.
"""
return '.'.join(str(i) for i in VERSION[:3])
__author__ = 'dlancer'
__docformat__ = 'restructuredtext en'
__copyright__ = 'Copyright 2015-2016, dlancer'
__license__ = 'BSD'
__version__ = get_release()
__maintainer__ = 'dlancer'
__email__ = 'dmdpost@gmail.com'
__status__ = 'Development'
|
{
"content_hash": "a2cba4be2f14bbc7a5afc8830285c406",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 49,
"avg_line_length": 21.454545454545453,
"alnum_prop": 0.597457627118644,
"repo_name": "dlancer/django-pages-cms-extensions",
"id": "8cd70afc4e5bbd9641b5a72f5c9f0c29188e11a2",
"size": "472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pagesext/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4913"
},
{
"name": "Python",
"bytes": "45531"
}
],
"symlink_target": ""
}
|
from bs4 import BeautifulSoup
import requests
url = raw_input("Enter a website to extract the URL's from: ")
r = requests.get("http://" +url)
data = r.text
soup = BeautifulSoup(data)
for link in soup.find_all('a'):
print(link.get('href'))
|
{
"content_hash": "ca4e2b8baaadcec55e8f8c3714d2d663",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 62,
"avg_line_length": 20.583333333333332,
"alnum_prop": 0.6882591093117408,
"repo_name": "mphz/bkdev",
"id": "27b1726e01fc64ecf6688f5cb2b03f45f637fcab",
"size": "247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycod/bs4crawlExample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1948"
},
{
"name": "C",
"bytes": "256"
},
{
"name": "CSS",
"bytes": "1964"
},
{
"name": "HTML",
"bytes": "1366553"
},
{
"name": "Java",
"bytes": "753147"
},
{
"name": "JavaScript",
"bytes": "245377"
},
{
"name": "Makefile",
"bytes": "2557"
},
{
"name": "Python",
"bytes": "15989"
},
{
"name": "Shell",
"bytes": "5337"
}
],
"symlink_target": ""
}
|
from __future__ import division
from nose.tools import *
import numpy.testing as npt
import dit
def test_pruned_samplespace_scalar():
"""Prune a sample space from a ScalarDistribution."""
pmf = [1/2, 0, 1/2]
d = dit.ScalarDistribution(pmf)
d2 = dit.algorithms.pruned_samplespace(d)
ss2_ = [0, 2]
ss2 = list(d2.sample_space())
assert_equal(ss2, ss2_)
npt.assert_allclose(d2.pmf, [1/2, 1/2])
def test_pruned_samplespace():
"""Prune a sample space from a Distribution."""
outcomes = ['0', '1', '2']
pmf = [1/2, 0, 1/2]
d = dit.ScalarDistribution(outcomes, pmf)
d2 = dit.algorithms.pruned_samplespace(d)
ss2_ = ['0', '2']
ss2 = list(d2.sample_space())
assert_equal(ss2, ss2_)
npt.assert_allclose(d2.pmf, [1/2, 1/2])
def test_pruned_samplespace2():
"""Prune a sample space while specifying a desired sample space."""
outcomes = ['0', '1', '2', '3']
pmf = [1/2, 0, 1/2, 0]
ss2_ = ['0', '1', '2']
d = dit.ScalarDistribution(outcomes, pmf)
d2 = dit.algorithms.pruned_samplespace(d, sample_space=ss2_)
# We must make it dense, since the zero element will not appear in pmf.
d2.make_dense()
ss2 = list(d2.sample_space())
assert_equal(ss2, ss2_)
npt.assert_allclose(d2.pmf, [1/2, 0, 1/2])
def test_expanded_samplespace():
"""Expand a sample space from a Distribution."""
outcomes = ['01', '10']
pmf = [1/2, 1/2]
d = dit.Distribution(outcomes, pmf, sample_space=outcomes)
assert_equal(list(d.sample_space()), ['01', '10'])
d2 = dit.algorithms.expanded_samplespace(d)
ss = ['00', '01', '10', '11']
assert_equal(list(d2.sample_space()), ss)
def test_expanded_samplespace2():
"""Expand a sample space from a ScalarDistribution."""
pmf = [1/2, 1/2]
ss = [0, 1]
d = dit.ScalarDistribution(pmf)
assert_equal(list(d.sample_space()), ss)
ss2 = [0, 1, 2]
d2 = dit.algorithms.expanded_samplespace(d, ss2)
assert_equal(list(d2.sample_space()), ss2)
def test_expanded_samplespace3():
"""Expand a sample space without unioning the alphabets."""
outcomes = ['01a', '10a']
pmf = [1/2, 1/2]
d = dit.Distribution(outcomes, pmf, sample_space=outcomes)
d2 = dit.algorithms.expanded_samplespace(d, union=False)
ss_ = ['00a', '01a', '10a', '11a']
assert_equal(list(d2.sample_space()), ss_)
def test_expanded_samplespace_bad():
"""Expand a sample space with wrong number of alphabets."""
outcomes = ['01', '10']
pmf = [1/2, 1/2]
d = dit.Distribution(outcomes, pmf)
alphabets = ['01']
assert_equal(d.outcome_length(), 2)
# This fails because we need to specify two alphabets, not one.
assert_raises(Exception, dit.algorithms.expanded_samplespace, d, alphabets)
def test_expanded_samplespace_bad2():
"""Expand a sample space with wrong number of alphabets."""
outcomes = '01'
pmf = [1/2, 1/2]
d = dit.Distribution(outcomes, pmf)
alphabets = '0'
assert_equal(d.outcome_length(), 1)
# This fails because the sample space is too small, doesn't contain '1'.
e = dit.exceptions.InvalidOutcome
assert_raises(e, dit.algorithms.expanded_samplespace, d, alphabets)
|
{
"content_hash": "8dfeffdc312a614c14c316c81e616e99",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 79,
"avg_line_length": 35.96629213483146,
"alnum_prop": 0.633864417369572,
"repo_name": "chebee7i/dit",
"id": "7d4a5f45419ca8c83acc7e82a6e648175a592a3a",
"size": "3201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dit/algorithms/tests/test_prune_expand.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5938"
},
{
"name": "HTML",
"bytes": "265"
},
{
"name": "PHP",
"bytes": "614"
},
{
"name": "Python",
"bytes": "714621"
}
],
"symlink_target": ""
}
|
"""
Use nose
`$ pip install nose`
`$ nosetests`
"""
import yaml
from urllib import quote
from hyde.fs import File, Folder
from hyde.model import Config, Expando
from hyde.site import Node, RootNode, Site
from nose.tools import raises, with_setup, nottest
TEST_SITE_ROOT = File(__file__).parent.child_folder('sites/test_jinja')
def test_node_site():
s = Site(TEST_SITE_ROOT)
r = RootNode(TEST_SITE_ROOT.child_folder('content'), s)
assert r.site == s
n = Node(r.source_folder.child_folder('blog'), r)
assert n.site == s
def test_node_root():
s = Site(TEST_SITE_ROOT)
r = RootNode(TEST_SITE_ROOT.child_folder('content'), s)
assert r.root == r
n = Node(r.source_folder.child_folder('blog'), r)
assert n.root == r
def test_node_parent():
s = Site(TEST_SITE_ROOT)
r = RootNode(TEST_SITE_ROOT.child_folder('content'), s)
c = r.add_node(TEST_SITE_ROOT.child_folder('content/blog/2010/december'))
assert c.parent == r.node_from_relative_path('blog/2010')
def test_node_module():
s = Site(TEST_SITE_ROOT)
r = RootNode(TEST_SITE_ROOT.child_folder('content'), s)
assert not r.module
n = r.add_node(TEST_SITE_ROOT.child_folder('content/blog'))
assert n.module == n
c = r.add_node(TEST_SITE_ROOT.child_folder('content/blog/2010/december'))
assert c.module == n
def test_node_url():
s = Site(TEST_SITE_ROOT)
r = RootNode(TEST_SITE_ROOT.child_folder('content'), s)
assert not r.module
n = r.add_node(TEST_SITE_ROOT.child_folder('content/blog'))
assert n.url == '/' + n.relative_path
assert n.url == '/blog'
c = r.add_node(TEST_SITE_ROOT.child_folder('content/blog/2010/december'))
assert c.url == '/' + c.relative_path
assert c.url == '/blog/2010/december'
def test_node_full_url():
s = Site(TEST_SITE_ROOT)
s.config.base_url = 'http://localhost'
r = RootNode(TEST_SITE_ROOT.child_folder('content'), s)
assert not r.module
n = r.add_node(TEST_SITE_ROOT.child_folder('content/blog'))
assert n.full_url == quote('http://localhost/blog')
c = r.add_node(TEST_SITE_ROOT.child_folder('content/blog/2010/december'))
assert c.full_url == quote('http://localhost/blog/2010/december')
def test_node_relative_path():
s = Site(TEST_SITE_ROOT)
r = RootNode(TEST_SITE_ROOT.child_folder('content'), s)
assert not r.module
n = r.add_node(TEST_SITE_ROOT.child_folder('content/blog'))
assert n.relative_path == 'blog'
c = r.add_node(TEST_SITE_ROOT.child_folder('content/blog/2010/december'))
assert c.relative_path == 'blog/2010/december'
def test_load():
s = Site(TEST_SITE_ROOT)
s.load()
path = 'blog/2010/december'
node = s.content.node_from_relative_path(path)
assert node
assert Folder(node.relative_path) == Folder(path)
path += '/merry-christmas.html'
resource = s.content.resource_from_relative_path(path)
assert resource
assert resource.relative_path == path
assert not s.content.resource_from_relative_path('/happy-festivus.html')
def test_walk_resources():
s = Site(TEST_SITE_ROOT)
s.load()
pages = [page.name for page in s.content.walk_resources()]
expected = ["404.html",
"about.html",
"apple-touch-icon.png",
"merry-christmas.html",
"crossdomain.xml",
"favicon.ico",
"robots.txt",
"site.css"
]
pages.sort()
expected.sort()
assert pages == expected
def test_contains_resource():
s = Site(TEST_SITE_ROOT)
s.load()
path = 'blog/2010/december'
node = s.content.node_from_relative_path(path)
assert node.contains_resource('merry-christmas.html')
def test_get_resource():
s = Site(TEST_SITE_ROOT)
s.load()
path = 'blog/2010/december'
node = s.content.node_from_relative_path(path)
resource = node.get_resource('merry-christmas.html')
assert resource == s.content.resource_from_relative_path(Folder(path).child('merry-christmas.html'))
def test_resource_slug():
s = Site(TEST_SITE_ROOT)
s.load()
path = 'blog/2010/december'
node = s.content.node_from_relative_path(path)
resource = node.get_resource('merry-christmas.html')
assert resource.slug == 'merry-christmas'
def test_get_resource_from_relative_deploy_path():
s = Site(TEST_SITE_ROOT)
s.load()
path = 'blog/2010/december'
node = s.content.node_from_relative_path(path)
resource = node.get_resource('merry-christmas.html')
assert resource == s.content.resource_from_relative_deploy_path(Folder(path).child('merry-christmas.html'))
resource.relative_deploy_path = Folder(path).child('merry-christmas.php')
assert resource == s.content.resource_from_relative_deploy_path(Folder(path).child('merry-christmas.php'))
def test_is_processable_default_true():
s = Site(TEST_SITE_ROOT)
s.load()
for page in s.content.walk_resources():
assert page.is_processable
def test_relative_deploy_path():
s = Site(TEST_SITE_ROOT)
s.load()
for page in s.content.walk_resources():
assert page.relative_deploy_path == Folder(page.relative_path)
assert page.url == '/' + page.relative_deploy_path
def test_relative_deploy_path_override():
s = Site(TEST_SITE_ROOT)
s.load()
res = s.content.resource_from_relative_path('blog/2010/december/merry-christmas.html')
res.relative_deploy_path = 'blog/2010/december/happy-holidays.html'
for page in s.content.walk_resources():
if res.source_file == page.source_file:
assert page.relative_deploy_path == 'blog/2010/december/happy-holidays.html'
else:
assert page.relative_deploy_path == Folder(page.relative_path)
class TestSiteWithConfig(object):
@classmethod
def setup_class(cls):
cls.SITE_PATH = File(__file__).parent.child_folder('sites/test_jinja_with_config')
cls.SITE_PATH.make()
TEST_SITE_ROOT.copy_contents_to(cls.SITE_PATH)
cls.config_file = File(cls.SITE_PATH.child('alternate.yaml'))
with open(cls.config_file.path) as config:
cls.config = Config(sitepath=cls.SITE_PATH, config_dict=yaml.load(config))
cls.SITE_PATH.child_folder('content').rename_to(cls.config.content_root)
@classmethod
def teardown_class(cls):
cls.SITE_PATH.delete()
def test_load_with_config(self):
s = Site(self.SITE_PATH, config=self.config)
s.load()
path = 'blog/2010/december'
node = s.content.node_from_relative_path(path)
assert node
assert Folder(node.relative_path) == Folder(path)
path += '/merry-christmas.html'
resource = s.content.resource_from_relative_path(path)
assert resource
assert resource.relative_path == path
assert not s.content.resource_from_relative_path('/happy-festivus.html')
def test_content_url(self):
s = Site(self.SITE_PATH, config=self.config)
s.load()
path = 'blog/2010/december'
assert s.content_url(path) == "/" + path
def test_content_url_encoding(self):
s = Site(self.SITE_PATH, config=self.config)
s.load()
path = '".jpg'
assert s.content_url(path) == quote("/" + path)
def test_content_url_encoding_safe(self):
s = Site(self.SITE_PATH, config=self.config)
s.load()
path = '".jpg/abc'
assert s.content_url(path, "") == quote("/" + path, "")
def test_media_url(self):
s = Site(self.SITE_PATH, config=self.config)
s.load()
path = 'css/site.css'
assert s.media_url(path) == "/media/" + path
def test_is_media(self):
s = Site(self.SITE_PATH, config=self.config)
s.load()
assert s.is_media('media/css/site.css')
s.config.media_root = 'monkey'
assert not s.is_media('media/css/site.css')
assert s.is_media('monkey/css/site.css')
def test_full_url_for_content(self):
s = Site(self.SITE_PATH, config=self.config)
s.load()
path = 'blog/2010/december'
assert s.full_url(path) == "/" + path
def test_full_url_for_media(self):
s = Site(self.SITE_PATH, config=self.config)
s.load()
path = 'media/css/site.css'
assert s.is_media(path)
full_url = s.full_url(path)
assert full_url == "/" + path
def test_media_url_from_resource(self):
s = Site(self.SITE_PATH, config=self.config)
s.load()
path = 'css/site.css'
resource = s.content.resource_from_relative_path(
Folder("media").child(path))
assert resource
assert resource.full_url == "/media/" + path
def test_config_ignore(self):
c = Config(self.SITE_PATH, config_dict=self.config.to_dict())
s = Site(self.SITE_PATH, config=c)
s.load()
path = 'apple-touch-icon.png'
resource = s.content.resource_from_relative_path(path)
assert resource
assert resource.full_url == "/" + path
s = Site(self.SITE_PATH, config=c)
s.config.ignore.append('*.png')
resource = s.content.resource_from_relative_path(path)
assert not resource
def test_config_ignore_nodes(self):
c = Config(self.SITE_PATH, config_dict=self.config.to_dict())
git = self.SITE_PATH.child_folder('.git')
git.make()
s = Site(self.SITE_PATH, config=c)
s.load()
git_node = s.content.node_from_relative_path('.git')
assert not git_node
blog_node = s.content.node_from_relative_path('blog')
assert blog_node
assert blog_node.full_url == "/blog"
s = Site(self.SITE_PATH, config=c)
s.config.ignore.append('blog')
blog_node = s.content.node_from_relative_path('blog')
assert not blog_node
git_node = s.content.node_from_relative_path('.git')
assert not git_node
|
{
"content_hash": "ca81cb7cf40350911ae674449a2aec86",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 111,
"avg_line_length": 35.95323741007194,
"alnum_prop": 0.6284142071035518,
"repo_name": "Valloric/hyde",
"id": "ed0b35e2d5ae7519c7bcb3bf4c7a1d63865ab3e7",
"size": "10019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hyde/tests/test_site.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "73"
},
{
"name": "Python",
"bytes": "389945"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.models import RealmFilter, all_realm_filters, get_realm_by_string_id
from zerver.lib.actions import do_add_realm_filter, do_remove_realm_filter
import sys
class Command(BaseCommand):
help = """Create a link filter rule for the specified domain.
NOTE: Regexes must be simple enough that they can be easily translated to JavaScript
RegExp syntax. In addition to JS-compatible syntax, the following features are available:
* Named groups will be converted to numbered groups automatically
* Inline-regex flags will be stripped, and where possible translated to RegExp-wide flags
Example: python manage.py realm_filters --realm=zulip --op=add '#(?P<id>[0-9]{2,8})' 'https://trac.humbughq.com/ticket/%(id)s'
Example: python manage.py realm_filters --realm=zulip --op=remove '#(?P<id>[0-9]{2,8})'
Example: python manage.py realm_filters --realm=zulip --op=show
"""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('-r', '--realm',
dest='string_id',
type=str,
required=True,
help='The subdomain or string_id of the realm to adjust filters for.')
parser.add_argument('--op',
dest='op',
type=str,
default="show",
help='What operation to do (add, show, remove).')
parser.add_argument('pattern', metavar='<pattern>', type=str, nargs='?', default=None,
help="regular expression to match")
parser.add_argument('url_format_string', metavar='<url pattern>', type=str, nargs='?',
help="format string to substitute")
def handle(self, *args, **options):
# type: (*Any, **str) -> None
realm = get_realm_by_string_id(options["string_id"])
if options["op"] == "show":
print("%s: %s" % (realm.domain, all_realm_filters().get(realm.domain, [])))
sys.exit(0)
pattern = options['pattern']
if not pattern:
self.print_help("python manage.py", "realm_filters")
sys.exit(1)
if options["op"] == "add":
url_format_string = options['url_format_string']
if not url_format_string:
self.print_help("python manage.py", "realm_filters")
sys.exit(1)
do_add_realm_filter(realm, pattern, url_format_string)
sys.exit(0)
elif options["op"] == "remove":
do_remove_realm_filter(realm, pattern)
sys.exit(0)
else:
self.print_help("python manage.py", "realm_filters")
sys.exit(1)
|
{
"content_hash": "81c2181fc71d3ad81fbc1f1bae940b1a",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 126,
"avg_line_length": 43.53623188405797,
"alnum_prop": 0.5875499334221038,
"repo_name": "reyha/zulip",
"id": "33912d17495e635cfb541ab6cf171d58bbef90c2",
"size": "3004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/management/commands/realm_filters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "241009"
},
{
"name": "Groovy",
"bytes": "5516"
},
{
"name": "HTML",
"bytes": "459437"
},
{
"name": "JavaScript",
"bytes": "1460191"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "82167"
},
{
"name": "Python",
"bytes": "2916875"
},
{
"name": "Ruby",
"bytes": "249748"
},
{
"name": "Shell",
"bytes": "35112"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict, Callable
class DefaultOrderedDict(OrderedDict):
# Source: http://stackoverflow.com/a/6190500/562769
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not isinstance(default_factory, Callable)):
raise TypeError('first argument must be callable')
OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'OrderedDefaultDict(%s, %s)' % (self.default_factory,
OrderedDict.__repr__(self))
|
{
"content_hash": "01979aded866152d9f11fe5d82b0ecf6",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 74,
"avg_line_length": 33.22727272727273,
"alnum_prop": 0.5649794801641587,
"repo_name": "oxinabox/Livecon-Printer",
"id": "6cd8a90d6db7365e93abc5736b1c589adb026c0f",
"size": "1462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quick_con_program_book/defaultordereddict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "48526"
},
{
"name": "Python",
"bytes": "10656"
},
{
"name": "TeX",
"bytes": "350458"
}
],
"symlink_target": ""
}
|
from textblob import TextBlob
from operator import itemgetter
class SentimentAnalysis(object):
"""Get sentiment analysis with only positive and negative considered.
Positive means normal logs and negative sentiment refers to possible attacks.
This class uses sentiment analysis feature from the TextBlob library [Loria2016]_.
References
----------
.. [Loria2016] Steven Loria and the contributors, TextBlob: Simple, Pythonic, text processing--Sentiment analysis,
part-of-speech tagging, noun phrase extraction, translation, and more.
https://github.com/sloria/TextBlob/
"""
def __init__(self, graph, clusters):
"""The constructor of class SentimentAnalysis.
Parameters
----------
graph : graph
The analyzed graph.
clusters : dict
Dictionary of cluster id and its vertices member.
"""
self.graph = graph
self.clusters = clusters
self.cluster_message = {}
def get_cluster_message(self):
"""Get most frequent message in a cluster.
"""
for cluster_id, cluster in self.clusters.iteritems():
event_frequency = {}
for node in cluster:
event = self.graph.node[node]['preprocessed_event']
event_frequency[event] = event_frequency.get(event, 0) + 1
sorted_event_frequency = sorted(event_frequency.items(), key=itemgetter(1), reverse=True)[0][0]
self.cluster_message[cluster_id] = sorted_event_frequency
def get_sentiment(self):
"""Get negative or positive sentiment.
Default score for sentiment score is -1 to 1. The value that close to 1 means more positive and vice versa.
The threshold >= 0.
Returns
-------
sentiment_score : dict
A dictionary containing key: cluster id and value: sentiment score.
"""
sentiment_score = {}
for cluster_id, message in self.cluster_message.iteritems():
possible_sentiment = TextBlob(message)
if possible_sentiment.sentiment.polarity >= 0.:
sentiment_score[cluster_id] = possible_sentiment.sentiment.polarity
elif possible_sentiment.sentiment.polarity < 0.:
sentiment_score[cluster_id] = possible_sentiment.sentiment.polarity
return sentiment_score
|
{
"content_hash": "3172765b67a4c75839a7eb3d35105176",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 118,
"avg_line_length": 39.83606557377049,
"alnum_prop": 0.6259259259259259,
"repo_name": "studiawan/pygraphc",
"id": "1df7b5d307ce79b1546e6915c99e6b2ce7379d02",
"size": "2430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pygraphc/anomaly/SentimentAnalysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "469965"
}
],
"symlink_target": ""
}
|
"""
vpls.py
Created by Nikita Shirokov on 2014-06-16.
Copyright (c) 2014-2015 Nikita Shirokov. All rights reserved.
Copyright (c) 2014-2015 Exa Networks. All rights reserved.
"""
from struct import unpack
from struct import pack
from exabgp.protocol.ip import IP
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
from exabgp.bgp.message import OUT
from exabgp.bgp.message.notification import Notify
from exabgp.bgp.message.update.nlri.nlri import NLRI
from exabgp.bgp.message.update.nlri.qualifier.rd import RouteDistinguisher
def _unique ():
value = 0
while True:
yield value
value += 1
unique = _unique()
class VPLS (NLRI):
__slots__ = ['action','nexthop','rd','base','offset','size','ve','unique']
def __init__ (self, rd, ve, base, offset, size):
NLRI.__init__(self,AFI.l2vpn,SAFI.vpls)
self.action = OUT.ANNOUNCE
self.nexthop = None
self.rd = rd
self.base = base
self.offset = offset
self.size = size
self.ve = ve
self.unique = unique.next()
def index (self):
return self.pack()
def pack (self, addpath=None):
return '%s%s%s%s' % (
'\x00\x11', # pack('!H',17)
self.rd.pack(),
pack(
'!HHH',
self.ve,
self.offset,
self.size
),
pack(
'!L',
(self.base << 4) | 0x1
)[1:] # setting the bottom of stack, should we ?
)
# XXX: FIXME: we need an unique key here.
# XXX: What can we use as unique key ?
def json (self):
content = ','.join([
self.rd.json(),
'"endpoint": "%s"' % self.ve,
'"base": "%s"' % self.offset,
'"offset": "%s"' % self.size,
'"size": "%s"' % self.base,
])
return '"vpls-%s": { %s }' % (self.unique, content)
def extensive (self):
return "vpls%s endpoint %s base %s offset %s size %s %s" % (
self.rd,
self.ve,
self.base,
self.offset,
self.size,
'' if self.nexthop is None else 'next-hop %s' % self.nexthop,
)
def __str__ (self):
return self.extensive()
@classmethod
def unpack (cls, afi, safi, data, addpath, nexthop, action):
# label is 20bits, stored using 3 bytes, 24 bits
length, = unpack('!H',data[0:2])
if len(data) != length+2:
raise Notify(3,10,'l2vpn vpls message length is not consistent with encoded data')
rd = RouteDistinguisher(data[2:10])
ve,offset,size = unpack('!HHH',data[10:16])
base = unpack('!L','\x00'+data[16:19])[0] >> 4
nlri = cls(rd,ve,base,offset,size)
nlri.action = action
nlri.nexthop = IP.unpack(nexthop)
return len(data), nlri
|
{
"content_hash": "ae59bd8e627f6d74b21691d816b2870c",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 85,
"avg_line_length": 24.69,
"alnum_prop": 0.6407452409882544,
"repo_name": "fugitifduck/exabgp",
"id": "b677b4e184c9f846a6652c1ee2f181f0a5eec49c",
"size": "2487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/exabgp/bgp/message/update/nlri/vpls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "1516"
},
{
"name": "Python",
"bytes": "1147571"
},
{
"name": "Shell",
"bytes": "14565"
}
],
"symlink_target": ""
}
|
from __future__ import division, print_function
import argparse
import functools
import gzip
import json
import os
import os.path
import sys
from datetime import date
from multiprocessing import Process, Queue
import requests
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
# Refseq structure
# - Release number
# - Divisions
# 1. archea
# 2. bacteria
# 3. fungi
# 4. invertebrate
# 5. mitochondrion
# 6. other
# 7. plant
# 8. plasmid
# 9. plastid
# 10. protozoa
# 11. vertebrate mammalian
# 12. vertebrate other
# 13. viral
# within each division
# DIVNAME.\d+(.\d+)?.(genomic|protein|rna).(fna|gbff|faa|gpff).gz
# where fna and faa are FASTA, gbff and gpff are Genbank
def _add_data_table_entry(data_manager_dict, data_table_entry, data_table_name):
data_manager_dict['data_tables'] = data_manager_dict.get('data_tables', {})
data_manager_dict['data_tables'][data_table_name] = data_manager_dict['data_tables'].get('all_fasta', [])
data_manager_dict['data_tables'][data_table_name].append(data_table_entry)
return data_manager_dict
def unzip_to(conn, out_dir, output_filename, chunk_size=4096, debug=False, compress=False):
input_filename = conn.get()
if compress:
open_output = gzip.open
else:
open_output = open
with open_output(os.path.join(out_dir, output_filename), 'wb') as output_file:
while input_filename != 'STOP':
if debug:
print('Reading', input_filename, file=sys.stderr)
with gzip.open(input_filename, 'rb') as input_file:
read_chunk = functools.partial(input_file.read, (chunk_size))
for data in iter(read_chunk, b''): # use b'' as a sentinel to stop the loop. note '' != b'' in Python 3
output_file.write(data)
os.unlink(input_filename)
input_filename = conn.get()
def get_refseq_division(division_name, mol_types, output_directory, debug=False, compress=False):
base_url = 'https://ftp.ncbi.nlm.nih.gov/refseq/release/'
valid_divisions = set(['archea', 'bacteria', 'complete', 'fungi', 'invertebrate', 'mitochondrion', 'other',
'plant', 'plasmid', 'plastid', 'protozoa', 'vertebrate_mammalian', 'vertebrate_other', 'viral'])
ending_mappings = {
'genomic': '.genomic.fna.gz',
'protein': '.protein.faa.gz',
'rna': 'rna.fna.gz'
}
assert division_name in valid_divisions, "Unknown division name ({})".format(division_name)
for mol_type in mol_types:
assert mol_type in ending_mappings, "Unknown molecule type ({})".format(mol_type)
if not os.path.exists(output_directory):
os.mkdir(output_directory)
release_num_file = base_url + 'RELEASE_NUMBER'
r = requests.get(release_num_file)
release_num = r.text.strip()
division_base_url = base_url + division_name
if debug:
print('Retrieving {}'.format(division_base_url), file=sys.stderr)
r = requests.get(division_base_url)
listing_text = r.text
unzip_queues = {}
unzip_processes = []
final_output_filenames = []
for mol_type in mol_types:
q = unzip_queues[mol_type] = Queue()
output_filename = division_name + '.' + release_num + '.' + mol_type + '.fasta'
if compress:
output_filename += '.gz'
final_output_filenames.append(output_filename)
unzip_processes.append(Process(target=unzip_to, args=(q, output_directory, output_filename),
kwargs=dict(debug=debug, compress=compress)))
unzip_processes[-1].start()
# sample line: <a href="vertebrate_other.86.genomic.gbff.gz">vertebrate_other.86.genomic.gbff.gz</a> 2018-07-13 00:59 10M
for line in StringIO(listing_text):
if '.gz' not in line:
continue
parts = line.split('"')
assert len(parts) == 3, "Unexpected line format: {}".format(line.rstrip())
filename = parts[1]
for mol_type in mol_types:
ending = ending_mappings[mol_type]
if filename.endswith(ending):
if debug:
print('Downloading:', filename, ending, mol_type, file=sys.stderr)
output_filename = os.path.join(output_directory, filename)
with open(output_filename, 'wb') as output_file:
r = requests.get(division_base_url + '/' + filename)
for chunk in r.iter_content(chunk_size=4096):
output_file.write(chunk)
conn = unzip_queues[mol_type]
conn.put(output_filename)
for mol_type in mol_types:
conn = unzip_queues[mol_type]
conn.put('STOP')
return [release_num, final_output_filenames]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download RefSeq databases')
parser.add_argument('--debug', default=False, action='store_true', help='Print debugging output to stderr (verbose)')
parser.add_argument('--compress', default=False, action='store_true', help='Compress output files')
parser.add_argument('--output_directory', default='tmp', help='Directory to write output to')
parser.add_argument('--galaxy_datamanager_filename', help='Galaxy JSON format file describing data manager inputs')
parser.add_argument('--division_names', help='RefSeq divisions to download')
parser.add_argument('--mol_types', help='Molecule types (genomic, rna, protein) to fetch')
parser.add_argument('--pin_date', help='Force download date to this version string')
args = parser.parse_args()
division_names = args.division_names.split(',')
mol_types = args.mol_types.split(',')
if args.galaxy_datamanager_filename is not None:
dm_opts = json.loads(open(args.galaxy_datamanager_filename).read())
output_directory = dm_opts['output_data'][0]['extra_files_path'] # take the extra_files_path of the first output parameter
data_manager_dict = {}
else:
output_directory = args.output_directory
for division_name in division_names:
if args.pin_date is not None:
today_str = args.pin_date
else:
today_str = date.today().strftime('%Y-%m-%d') # ISO 8601 date format
[release_num, fasta_files] = get_refseq_division(division_name, mol_types, output_directory, args.debug, args.compress)
if args.galaxy_datamanager_filename is not None:
for i, mol_type in enumerate(mol_types):
assert mol_type in fasta_files[i], "Filename does not contain expected mol_type ({}, {})".format(mol_type, fasta_files[i])
unique_key = 'refseq_' + division_name + '.' + release_num + '.' + mol_type # note: this is now same as dbkey
dbkey = unique_key
desc = 'RefSeq ' + division_name + ' Release ' + release_num + ' ' + mol_type + ' (' + today_str + ')'
path = os.path.join(output_directory, fasta_files[i])
_add_data_table_entry(data_manager_dict=data_manager_dict,
data_table_entry=dict(value=unique_key, dbkey=dbkey, name=desc, path=path),
data_table_name='all_fasta')
open(args.galaxy_datamanager_filename, 'w').write(json.dumps(data_manager_dict, sort_keys=True))
|
{
"content_hash": "e8a13177e05bc1112fda43d23fa1a773",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 138,
"avg_line_length": 45.52147239263804,
"alnum_prop": 0.6254716981132076,
"repo_name": "dpryan79/tools-iuc",
"id": "81be5253bda9d662db455737da89ea9589e5482a",
"size": "7443",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "data_managers/data_manager_fetch_refseq/data_manager/fetch_refseq.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "4220"
},
{
"name": "HTML",
"bytes": "11172600"
},
{
"name": "Mako",
"bytes": "2116"
},
{
"name": "Max",
"bytes": "140358"
},
{
"name": "OpenEdge ABL",
"bytes": "1960016"
},
{
"name": "Pep8",
"bytes": "87474"
},
{
"name": "Perl",
"bytes": "59018"
},
{
"name": "Python",
"bytes": "640901"
},
{
"name": "R",
"bytes": "244557"
},
{
"name": "Rebol",
"bytes": "1225"
},
{
"name": "Roff",
"bytes": "3011"
},
{
"name": "Shell",
"bytes": "79414"
},
{
"name": "UnrealScript",
"bytes": "660637"
},
{
"name": "eC",
"bytes": "24"
}
],
"symlink_target": ""
}
|
from typing import Any, Union, cast
from ...error import GraphQLError
from ...language import (
SKIP,
DirectiveDefinitionNode,
DocumentNode,
ExecutableDefinitionNode,
SchemaDefinitionNode,
SchemaExtensionNode,
TypeDefinitionNode,
VisitorAction,
)
from . import ASTValidationRule
__all__ = ["ExecutableDefinitionsRule"]
class ExecutableDefinitionsRule(ASTValidationRule):
"""Executable definitions
A GraphQL document is only valid for execution if all definitions are either
operation or fragment definitions.
See https://spec.graphql.org/draft/#sec-Executable-Definitions
"""
def enter_document(self, node: DocumentNode, *_args: Any) -> VisitorAction:
for definition in node.definitions:
if not isinstance(definition, ExecutableDefinitionNode):
def_name = (
"schema"
if isinstance(
definition, (SchemaDefinitionNode, SchemaExtensionNode)
)
else "'{}'".format(
cast(
Union[DirectiveDefinitionNode, TypeDefinitionNode],
definition,
).name.value
)
)
self.report_error(
GraphQLError(
f"The {def_name} definition is not executable.",
definition,
)
)
return SKIP
|
{
"content_hash": "02df932788376d3c2113906059022db0",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 80,
"avg_line_length": 30.58,
"alnum_prop": 0.552648790058862,
"repo_name": "graphql-python/graphql-core",
"id": "01c38fd68900a538c1d05cb0be98d6b05dab2a98",
"size": "1529",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/graphql/validation/rules/executable_definitions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2235538"
}
],
"symlink_target": ""
}
|
import locale
from invoke import ctask as task, Collection
from functools import partial
from invoke.runners import Local, Runner
from paramiko.client import SSHClient, AutoAddPolicy
class RemoteRunner(Runner):
def __init__(self, *args, **kwargs):
super(RemoteRunner, self).__init__(*args, **kwargs)
self.context
def start(self, command):
self.ssh_client = SSHClient()
self.ssh_client.load_system_host_keys()
self.ssh_client.set_missing_host_key_policy(AutoAddPolicy())
self.ssh_client.connect(self.context.remote_runner.hostname, username=self.context.remote_runner.username)
self.ssh_channel = self.ssh_client.get_transport().open_session()
if self.using_pty:
self.ssh_channel.get_pty()
self.ssh_channel.exec_command(command)
def stdout_reader(self):
return self.ssh_channel.recv
def stderr_reader(self):
return self.ssh_channel.recv_stderr
def default_encoding(self):
return locale.getpreferredencoding(True)
def wait(self):
return self.ssh_channel.recv_exit_status()
def returncode(self):
return self.ssh_channel.recv_exit_status()
|
{
"content_hash": "7b37cdc3890fe7a644e217db35405239",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 114,
"avg_line_length": 33.22222222222222,
"alnum_prop": 0.6839464882943144,
"repo_name": "ksurct/MercuryRoboticsEmbedded2016",
"id": "704c9d07e1dc0b45a513fb15305007eab653b5f9",
"size": "1196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/runners.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1299"
},
{
"name": "Makefile",
"bytes": "459"
},
{
"name": "Nix",
"bytes": "4496"
},
{
"name": "Protocol Buffer",
"bytes": "1740"
},
{
"name": "Python",
"bytes": "63736"
},
{
"name": "Shell",
"bytes": "147"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
import numpy as np
import time
import multiprocessing as mp
import threading
import Queue
class CustomRunner(object):
"""
This class manages the the background threads needed to fill
a queue full of data.
# Need to call the following code block after initializing everything
self.sess.run(tf.global_variables_initializer())
if self.use_tf_threading:
self.coord = tf.train.Coordinator()
self.net.train_runner.start_p_threads(self.sess)
tf.train.start_queue_runners(sess=self.sess, coord=self.coord)
"""
def __init__(self, arg_less_fn, override_dtypes=None,
n_threads=1, n_processes=3, max_size=30):
# arg_less_fn should be function that returns already ready data
# in the form of numpy arrays. The shape of the output is
# used to shape the output tensors. Should be ready to call at init_time
# override_dtypes is the typing, default to numpy's encoding.
self.data_fn = arg_less_fn
self.n_threads = n_threads
self.n_processes = n_processes
self.max_size = max_size
self.use_pool = False
# data_fn shouldn't take any argument,
# just directly return the necessary data
# set via the setter fn
data = self.data_fn()
self.inps = []
shapes, dtypes = [], []
for i, d in enumerate(data):
inp = tf.placeholder(dtype=d.dtype, shape=[None] + list(d.shape[1:]))
self.inps.append(inp)
# remove batching index for individual element
shapes.append(d.shape[1:])
dtypes.append(d.dtype)
# The actual queue of data.
self.tf_queue = tf.FIFOQueue(shapes=shapes,
# override_dtypes or default
dtypes=override_dtypes or dtypes,
capacity=2000)
# The symbolic operation to add data to the queue
self.enqueue_op = self.tf_queue.enqueue_many(self.inps)
def get_inputs(self, batch_size):
"""
Return's tensors containing a batch of images and labels
if tf_queue has been closed this will raise a QueueBase exception
killing the main process if a StopIteration is thrown in one of the
data processes.
"""
return self.tf_queue.dequeue_up_to(tf.reduce_min([batch_size, self.tf_queue.size()]))
def thread_main(self, sess, stop_event):
"""
Function run on alternate thread. Basically, keep adding data to the queue.
"""
tt_last_update = time.time() - 501
count = 0
tot_p_end = 0
processes_all_done = False
while not stop_event.isSet():
if tt_last_update + 500 < time.time():
t = time.time()
# 500 seconds since last update
#print("DataQueue Threading Update:")
#print("TIME: " + str(t))
# MP.Queue says it is not thread safe and is not perfectly accurate.
# Just want to make sure there's no leakage and max_size
# is safely hit
#print("APPROX SIZE: %d" % self.queue.qsize())
#print("TOTAL FETCH ITERATIONS: %d" % count)
tt_last_update = t
count += 1
if processes_all_done and self.queue.empty():
break
try:
data = self.queue.get(5)
except Queue.Empty:
continue
if type(data) == type(StopIteration()):
tot_p_end += 1
if tot_p_end == self.n_processes:
# Kill any processes
# may need a lock here if multithreading
processes_all_done = True
#print("ALL PROCESSES DONE")
continue
fd = {}
for i, d in enumerate(data):
fd[self.inps[i]] = d
sess.run(self.enqueue_op, feed_dict=fd)
self.queue.close()
def process_main(self, queue):
# Scramble seed so it's not a copy of the parent's seed
np.random.seed()
# np.random.seed(1)
try:
while True:
queue.put(self.data_fn())
except StopIteration as e:
# Should only manually throw when want to close queue
queue.put(e)
#raise e
return
except Exception as e:
queue.put(StopIteration())
#raise e
return
def set_data_fn(self, fn):
self.data_fn = fn
def start_p_threads(self, sess):
""" Start background threads to feed queue """
self.processes = []
self.queue = mp.Queue(self.max_size)
for n in range(self.n_processes):
p = mp.Process(target=self.process_main, args=(self.queue,))
p.daemon = True # thread will close when parent quits
p.start()
self.processes.append(p)
self.threads = []
self.thread_event_killer = []
for n in range(self.n_threads):
kill_thread = threading.Event()
self.thread_event_killer.append(kill_thread)
t = threading.Thread(target=self.thread_main, args=(sess, kill_thread))
t.daemon = True # thread will close when parent quits
t.start()
self.threads.append(t)
return self.processes + self.threads
def kill_programs(self):
# Release objects here if need to
# threads should die in at least 5 seconds because
# nothing blocks for more than 5 seconds
# Sig term, kill first so no more data
[p.terminate() for p in self.processes]
[p.join() for p in self.processes]
# kill second after purging
[e.set() for e in self.thread_event_killer]
|
{
"content_hash": "b0a30ae31e06d902e648d8d4f4610265",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 93,
"avg_line_length": 37.157575757575756,
"alnum_prop": 0.5419996737889414,
"repo_name": "minyoungg/selfconsistency",
"id": "f27211070cb21667d7715d9674b3810076ffeb7f",
"size": "6131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/utils/queue_runner.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "587483"
},
{
"name": "Python",
"bytes": "113145"
},
{
"name": "Shell",
"bytes": "267"
}
],
"symlink_target": ""
}
|
import scrapy, json
from scrapy.exceptions import CloseSpider
from scrapy import Request
from json import loads
from datetime import date
from collections import OrderedDict
class SegurarseComArSpider(scrapy.Spider):
name = "segurarse_com_ar_spider"
#start_urls = ('https://segurarse.com.ar/cotizador-de-seguros-auto',)
company_list = ['allianz','liberty','mapfre','mercantil','meridional','orbis','sancristobal','sancor','rsa','zurich',]
ins_types = {'valA':'Responsabilidad Civil','valB':'Terceros Completos','valC':'Terceros Completos Full','valD':'Terceros Completos Full + Granizo','valE':'Todo Riesgo'}
use_selenium = False
def __init__(self, categories=None, *args, **kwargs):
super(SegurarseComArSpider, self).__init__(*args, **kwargs)
#pass
if not categories:
raise CloseSpider('Received no categories!')
else:
self.categories = categories
self.sub_urls = loads(self.categories).keys()
# Combination sample:
# brand / version / year / age / province / location / zip
# TOYOTA---LITE*ACE---1988---40---LA*PAMPA---LOS*OLIVOS---8203
def start_requests(self):
for param in self.sub_urls:
for company in self.company_list:
compania = company#"zurich"
marca = param.split('---')[0].replace('*',' ')#"TOYOTA"
anioNum = param.split('---')[2]#"2016"
#esCeroKm = "False"
#tieneAlarma = "False"
provincia = param.split('---')[4].replace('*',' ')#"CAPITAL FEDERAL"
localidad = param.split('---')[5].replace('*',' ')#"BELGRANO"
version = param.split('---')[1].replace('*',' ')#"COROLLA 1.8 SE-G L/14"
codigoPostal = param.split('---')[-1]#"1428"
#sumaAseguradaIA = "375.000,00"
#codigoIA = "450245"
edad = param.split('---')[3]#"35"
gnc = "False"
cobertura = "4"
sexo="Masculino"
url = "https://segurarse.com.ar/Service2/CotizarWEB"
query = "compania={}&marca={}&anioNum={}&provincia={}&localidad={}&version={}&codigoPostal={}&edad={}&cobertura={}&sexo={}".\
format(compania, marca, anioNum, provincia, localidad, version, codigoPostal, edad, cobertura, sexo)
url = url + "?" + query.replace(' ','+')
yield Request(url, callback=self.parse_products, meta={'param':param, 'company':company})
def parse_products(self, response):
param = response.meta['param']
value_list = json.loads(response.body)
for value_key in value_list.keys():
if value_list[value_key] == "-":
continue
item = OrderedDict()
item['Vendedor'] = 428
item['Model'] = param.split('---')[1].replace('*',' ')
item['Brand'] = param.split('---')[0].replace('*',' ')
item['Year'] = param.split('---')[2]
item['Location'] = param.split('---')[4].replace('*',' ') + " " + param.split('---')[5].replace('*',' ')
item['Age'] = param.split('---')[3]
item['Date'] = date.today()
item['Company'] = response.meta['company'].title()
item['Insurance Type'] = self.ins_types[value_key]
item['Price'] = value_list[value_key].replace('$','').replace('.','').replace(',','.')
item['Currency'] = "ARS"
yield item
|
{
"content_hash": "3dd4b0dd8704760e021cada4a52c082e",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 170,
"avg_line_length": 35.160919540229884,
"alnum_prop": 0.6260215756783263,
"repo_name": "hanjihun/Car",
"id": "d96d33e62598ac7b01581838ca0ade94ed3a30c3",
"size": "3083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "segurarse_com_ar/segurarse_com_ar/spiders/segurarse_com_ar_spider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "105671"
}
],
"symlink_target": ""
}
|
import requests
import random
# This is like namedtuple, but mutable. (https://pypi.python.org/pypi/recordtype)
# `pip install recordtype` to get it
from recordtype import recordtype
GHIBLI_URL = "https://ghibliapi.herokuapp.com/"
session = requests.session()
# We don't want to panic when we see charles custom SSL cert.
# I've turned ssl verification off, but you can also pass a file path to charles's cert
# There are several ways to handle this.
session.verify = False
# Here's what passing the cert file would look like.
# You can ask charles to save the file somewhere on your machine
# session.verify = "/Users/Christopher/charles_sessions/charles-ssl-proxying-certificate.pem"
Person = recordtype("Person", ["id",
"name",
"gender",
"age",
"eye_color",
"hair_color",
"films",
"species",
"url"])
Species = recordtype("Species", ["id",
"name",
"classification",
"eye_colors",
"hair_colors",
"url",
"people",
"films", ])
Film = recordtype("Film", ["id",
"title",
"description",
"director",
"producer",
"release_date",
"rt_score",
"people",
"species",
"locations",
"vehicles",
"url"])
Vehicle = recordtype("Vehicle", ["id",
"name",
"description",
"vehicle_class",
"length",
"pilot",
"films",
"url", ])
Location = recordtype("Location", ["id",
"name",
"climate",
"terrain",
"surface_water",
"residents",
"films",
"url", ])
def get_record(recordtype, record_url):
"""
Given a record url and a recordtype, fetch the record from the and build the record.
Assume there's just one record at the location and everything goes well
"""
resp = session.get(record_url)
new_record = recordtype(**resp.json())
return new_record
def is_specific_url(url):
"""
This api has a tendency to give you the url for the general list of things
if there are no entries.
this separates "people/" from "people/123456"
"""
return url[-1] != '/'
def get_all_people():
"""
You can hit the resource name without an ID to get a listing of all of that resource
"""
resp = session.get(GHIBLI_URL + "people")
people = [Person(**json_person)
for json_person in resp.json()]
return people
def make_crossover(all_people):
"""
Make a new story about the studio ghibli characters
"""
story = """
{protagonist.name} is a {protagonist.gender} {protagonist.species.name},
and {friend.name} is a {friend.gender} {friend.species.name}, and they are going on an adventure.
They have to challenge many obstacles, and fight the evil {antagonist.name}, a {antagonist.age} year old
{antagonist.species.name}
"""
# import pudb; pudb.set_trace() # breakpoint 7ee757b5 //
story_choices = {
"protagonist": random.choice(all_people),
"friend": random.choice(all_people),
"antagonist": random.choice(all_people),
}
print story.format(**story_choices)
from guppy import hpy
heap = hpy()
print heap.heap()
def main():
people = get_all_people()
for person in people:
person.species = get_record(Species, person.species)
film_objs = []
# I wish this graph was fully connected, it would be cooler for the story, maybe...
for film_url in person.films:
film = get_record(Film, film_url)
film.locations = [get_record(Location, url)
for url in film.locations if is_specific_url(url)]
film.vehicles = [get_record(Vehicle, url)
for url in film.vehicles if is_specific_url(url)]
film_objs.append(film)
person.films = film_objs
make_crossover(people)
if __name__ == '__main__':
main()
|
{
"content_hash": "54c24ab069a3b618b6fec86ffd0b8a51",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 108,
"avg_line_length": 34.186206896551724,
"alnum_prop": 0.48113778495057496,
"repo_name": "noisebridge/PythonClass",
"id": "f90cc0604730602a4aaae583a2f8ead38f06e506",
"size": "6108",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "guest-talks/20170509-debugging/ghiblister.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3255"
},
{
"name": "HTML",
"bytes": "524536"
},
{
"name": "Jupyter Notebook",
"bytes": "493067"
},
{
"name": "Mako",
"bytes": "824"
},
{
"name": "Perl",
"bytes": "34109"
},
{
"name": "Python",
"bytes": "474536"
},
{
"name": "Shell",
"bytes": "263"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
__title__ = 'server-profile-template'
__version__ = '0.0.1'
__copyright__ = '(C) Copyright (2012-2016) Hewlett Packard Enterprise Development LP'
__license__ = 'MIT'
__status__ = 'Development'
from hpOneView.resources.resource import ResourceClient
class ServerProfiles(object):
URI = '/rest/server-profiles'
def __init__(self, con):
self._connection = con
self._client = ResourceClient(con, self.URI)
self.__default_values = {
'type': 'ServerProfileV5'
}
def create(self, resource, timeout=-1):
"""
Creates a server profile using the information provided in the resource parameter. Connection requests can be
one of the following types - port auto, auto and explicit. An explicit request is where the request includes
the adapter, port and flexNic. An auto request is where none of the three are specified and a port auto
request is where just the adapter and port are specified.
Args:
resource: dict object to create
timeout:
Timeout in seconds. Wait task completion by default. The timeout does not abort the operation
in OneView, just stop waiting for its completion.
Returns:
dict: Created server profile.
"""
data = self.__default_values.copy()
data.update(resource)
return self._client.create(resource=data, timeout=timeout)
def update(self, resource, id_or_uri):
"""
Allows a server profile object to have its configuration modified. These modifications can be as simple as a
name or description change or much more complex changes around the assigned server and networking configuration.
It should be noted that selection of a virtual or physical MAC or Serial Number is not mutable once a profile
has been created, and attempts to change those elements will not be applied to the target profile. Connection
requests can be one of the following types - port Auto, auto and explicit. An explicit request is where the
request portId parameter includes the adapter, port and flexNic. An auto request is where portId is set to
"Auto" and a port auto request is where just the portId parameter includes just the adapter and port.
Args:
id_or_uri: Could be either the server profile id or the server profile uri.
resource (dict): Object to update.
Returns:
dict: The server profile resource.
"""
data = self.__default_values.copy()
data.update(resource)
return self._client.update(resource=data, uri=id_or_uri)
def patch(self, id_or_uri, operation, path, value, timeout=-1):
"""
Performs a specific patch operation for the given server profile.
The supported operation is:
Update the server profile from the server profile template.
Operation: replace
Path: /templateCompliance
Value: Compliant
Args:
id_or_uri:
Could be either the server profile id or the server profile uri
operation:
The type of operation: one of "add", "copy", "move", "remove", "replace", or "test".
path:
The JSON path the operation is to use. The exact meaning depends on the type of operation.
value:
The value to add or replace for "add" and "replace" operations, or the value to compare against
for a "test" operation. Not used by "copy", "move", or "remove".
Returns:
dict: Server profile resource.
"""
return self._client.patch(id_or_uri, operation, path, value, timeout)
def delete(self, resource, timeout=-1):
"""
Deletes a server profile object from the appliance based on its server profile UUID.
Args:
resource (dict): Object to delete.
timeout:
Timeout in seconds. Wait task completion by default. The timeout does not abort the operation
in OneView, just stops waiting for its completion.
Returns:
bool: Indicating if the server profile was successfully deleted.
"""
return self._client.delete(resource=resource, timeout=timeout)
def get_all(self, start=0, count=-1, filter='', sort=''):
"""
Gets a list of server profile based on optional sorting and filtering, and constrained by start and
count parameters.
Gets a list of server profiles based on optional sorting and filtering, and constrained by start and count
parameters. Providing a -1 for the count parameter will restrict the result set size to 64 server profiles.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return.
Providing a -1 for the count parameter will restrict the result set size to 64 server profile
templates. The maximum number of profile templates is restricted to 256, i.e., if user requests more
than 256, this will be internally limited to 256.
The actual number of items in the response may differ from the
requested count if the sum of start and count exceed the total number of items, or if returning the
requested number of items would take too long.
filter:
A general filter/query string to narrow the list of items returned. The
default is no filter - all resources are returned.
Filters are supported for the name, description, serialNumber, uuid, affinity, macType, wwnType,
serialNumberType, serverProfileTemplateUri, templateCompliance, status and state attributes.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time, with the oldest entry first.
Returns:
list: A list of server profiles.
"""
return self._client.get_all(start=start, count=count, filter=filter, sort=sort)
def get(self, id_or_uri):
"""
Retrieves a server profile managed by the appliance by ID or by uri.
Args:
id_or_uri: Could be either the server profile resource id or uri.
Returns:
dict: The server profile resource.
"""
return self._client.get(id_or_uri=id_or_uri)
def get_by(self, field, value):
"""
Get all server profile that matches a specified filter.
The search is case insensitive.
Args:
field: field name to filter
value: value to filter
Returns:
list: A list of server profiles.
"""
return self._client.get_by(field, value)
def get_by_name(self, name):
"""
Gets a server profile by name.
Args:
name: Name of the server profile.
Returns:
dict: The server profile resource.
"""
return self._client.get_by_name(name)
def get_schema(self):
"""
Generates the ServerProfile schema.
Returns:
dict: The server profile schema.
"""
return self._client.get_schema()
def get_compliance_preview(self, id_or_uri):
"""
Gets the preview of manual and automatic updates required to make the server profile
consistent with its template.
Args:
id_or_uri: Could be either the server profile resource id or uri.
Returns:
dict: Server profile compliance preview.
"""
uri = self._client.build_uri(id_or_uri) + '/compliance-preview'
return self._client.get(uri)
def get_profile_ports(self, **kwargs):
"""
Retrieves the port model associated with a server or server hardware type and enclosure group.
Args:
enclosureGroupUri (str):
The URI of the enclosure group associated with the resource.
serverHardwareTypeUri (str):
The URI of the server hardware type associated with the resource.
serverHardwareUri (str):
The URI of the server hardware associated with the resource.
Returns:
dict: Profile port.
"""
uri = self.__build_uri_with_query_string(kwargs, '/profile-ports')
return self._client.get(uri)
def get_messages(self, id_or_uri):
"""
Retrieve the error or status messages associated with the specified profile.
Args:
id_or_uri: Could be either the server profile resource id or uri.
Returns:
dict: Server Profile Health
"""
uri = self._client.build_uri(id_or_uri) + '/messages'
return self._client.get(uri)
def get_transformation(self, id_or_uri, **kwargs):
"""
Transforms an existing profile by supplying a new server hardware type and/or enclosure group. A profile
will be returned with a new configuration based on the capabilities of the supplied server hardware type
and/or enclosure group. All deployed connections will have their port assignment set to 'Auto'.
Re-selection of the server hardware may also be required. The new profile can subsequently be used for update
the server profile but is not guaranteed to pass validation. Any incompatibilities will be flagged when the
transformed server profile is submitted.
Args:
id_or_uri:
Could be either the server profile resource id or uri.
enclosureGroupUri (str):
The URI of the enclosure group associated with the resource.
serverHardwareTypeUri (str):
The URI of the server hardware type associated with the resource.
serverHardwareUri (str):
The URI of the server hardware associated with the resource.
Returns:
dict: Server Profile
"""
uri = self.__build_uri_with_query_string(kwargs, '/transformation', id_or_uri)
return self._client.get(uri)
def get_available_networks(self, **kwargs):
"""
Retrieves the list of Ethernet networks, Fibre Channel networks and network sets that are available to a
server profile along with their respective ports.
Args:
enclosureGroupUri (str):
The URI of the enclosure group associated with the resource.
functionType (str):
The FunctionType (Ethernet or FibreChannel) to filter the list of networks returned.
serverHardwareTypeUri (str):
The URI of the server hardware type associated with the resource.
serverHardwareUri (str):
The URI of the server hardware associated with the resource.
view (str):
Return a specific subset of the attributes of the resource or collection, by specifying the name of a
predefined view. The default view is expand - show all attributes of the resource and all elements of
collections of resources.
Values:
Ethernet - Specifies that the connection is to an Ethernet network or a network set.
FibreChannel - Specifies that the connection is to a Fibre Channel network.
Returns:
list: Available networks.
"""
uri = self.__build_uri_with_query_string(kwargs, '/available-networks')
return self._client.get(uri)
def get_available_servers(self, **kwargs):
"""
Retrieves the list of available servers.
Args:
enclosureGroupUri (str):
The URI of the enclosure group associated with the resource.
serverHardwareTypeUri (str):
The URI of the server hardware type associated with the resource.
profileUri (str):
The URI of the server profile resource.
Returns:
list: Available servers.
"""
uri = self.__build_uri_with_query_string(kwargs, '/available-servers')
return self._client.get(uri)
def get_available_storage_system(self, **kwargs):
"""
Retrieve a specific storage system and its associated volumes that are available to the server profile based
on the given server hardware type and enclosure group.
Args:
enclosureGroupUri (str):
The URI of the enclosure group associated with the resource.
serverHardwareTypeUri (str):
The URI of the server hardware type associated with the resource.
storageSystemId (str):
The storage system ID associated with the resource.
Returns:
dict: Available storage system.
"""
uri = self.__build_uri_with_query_string(kwargs, '/available-storage-system')
return self._client.get(uri)
def get_available_storage_systems(self, start=0, count=-1, filter='', sort='', **kwargs):
"""
Retrieves the list of the storage systems and their associated volumes that are available to the server profile
based on the given server hardware type and enclosure group.
Args:
count:
The number of resources to return. A count of -1 requests all the items. The actual number of items in
the response may differ from the requested count if the sum of start and count exceed the total
number of items, or if returning the requested number of items would take too long.
start:
The first item to return, using 0-based indexing. If not specified, the default is 0 - start with the
first available item.
filter:
A general filter/query string to narrow the list of items returned. The default is no filter - all
resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based on create time, with the
oldest entry first.
enclosureGroupUri (str):
The URI of the enclosure group associated with the resource.
serverHardwareTypeUri (str):
The URI of the server hardware type associated with the resource.
Returns:
list: Available storage systems.
"""
uri = self.__build_uri_with_query_string(kwargs, '/available-storage-systems')
return self._client.get_all(start=start, count=count, filter=filter, sort=sort, uri=uri)
def get_available_targets(self, **kwargs):
"""
Retrieves a list of the target servers and empty device bays that are available for assignment to the server
profile.
Args:
enclosureGroupUri (str):
The URI of the enclosure group associated with the resource.
serverHardwareTypeUri (str):
The URI of the server hardware type associated with the resource.
profileUri (str):
The URI of the server profile associated with the resource
Returns:
list:???
"""
uri = self.__build_uri_with_query_string(kwargs, '/available-targets')
return self._client.get(uri)
def __build_uri_with_query_string(self, kwargs, sufix_path, id_or_uri=None):
uri = self.URI
if id_or_uri:
uri = self._client.build_uri(id_or_uri)
query_string = '&'.join('{}={}'.format(key, kwargs[key]) for key in sorted(kwargs))
return uri + sufix_path + '?' + query_string
|
{
"content_hash": "3759ee42f202fb0061aa488e863a3910",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 120,
"avg_line_length": 42.385416666666664,
"alnum_prop": 0.6225116736298845,
"repo_name": "andreadean5/python-hpOneView",
"id": "abff3974f2ad0cddc6348b59f589418318c7eeaa",
"size": "17436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hpOneView/resources/servers/server_profiles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "920844"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from django.contrib import admin
from . import views
admin.autodiscover()
urlpatterns = [
url(r'^login/$', views.Login.as_view(), name='login'),
url(r'^logout/$', views.Logout.as_view(), name='logout'),
url(r'^$', views.PublicIndex.as_view(), name='public-index'),
url(r'^scorers/$', views.ScorersIndex.as_view(), name='index'),
url(r'^(?P<category>[a-z-]+)/entries/$', views.EntryList.as_view(), name='entry-list'),
url(r'^(?P<category>[a-z-]+)/entries/add/$', views.EntryAdd.as_view(), name='entry-add'),
url(r'^(?P<category>[a-z-]+)/entries/edit/(?P<entry>\d+)/$', views.EntryEdit.as_view(), name='entry-edit'),
url(r'^(?P<category>[a-z-]+)/entries/delete/(?P<entry>\d+)/$', views.EntryDelete.as_view(), name='entry-delete'),
url(r'^(?P<category>[a-z-]+)/first-round/set-groups/$', views.FirstRoundSetGroups.as_view(), name='first-round-set-groups'),
url(r'^(?P<category>[a-z-]+)/first-round/set-groups/add/$', views.FirstRoundGroupAdd.as_view(), name='first-round-group-add'),
url(r'^(?P<category>[a-z-]+)/first-round/set-groups/remove/$', views.FirstRoundGroupRemove.as_view(), name='first-round-group-remove'),
url(r'^(?P<category>[a-z-]+)/first-round/matches/$', views.FirstRoundMatches.as_view(), name='first-round-matches'),
url(r'^(?P<category>[a-z-]+)/first-round/matches/(?P<group>\d+)/(?P<time>\d+)/(?P<match>\d+)/$', views.FirstRoundMatchRecord.as_view(), name='first-round-match-record'),
url(r'^(?P<category>[a-z-]+)/first-round/matches/(?P<group>\d+)/(?P<time>\d+)/verify/$', views.FirstRoundMatchVerify.as_view(), name='first-round-match-verify'),
url(r'^(?P<category>[a-z-]+)/first-round/leaderboard/$', views.FirstRoundLeaderboard.as_view(), name='first-round-leaderboard'),
url(r'^(?P<category>[a-z-]+)/first-round/scoresheets/$', views.FirstRoundScoresheets.as_view(), name='first-round-scoresheets'),
url(r'^(?P<category>[a-z-]+)/first-round/judges/$', views.FirstRoundJudges.as_view(), name='first-round-judges'),
url(r'^first-round/leaderboard/recurve/$', views.FirstRoundLeaderboardExportRecurve.as_view(), name='first-round-leaderboard-export-recurve'),
url(r'^first-round/leaderboard/compound/$', views.FirstRoundLeaderboardExportCompound.as_view(), name='first-round-leaderboard-export-compound'),
url(r'^(?P<category>[a-z-]+)/first-round/$', views.PublicFirstRound.as_view(), name='public-first-round'),
url(r'^(?P<category>[a-z-]+)/second-round/set-groups/$', views.SecondRoundSetGroups.as_view(), name='second-round-set-groups'),
url(r'^(?P<category>[a-z-]+)/second-round/matches/$', views.SecondRoundMatches.as_view(), name='second-round-matches'),
url(r'^(?P<category>[a-z-]+)/second-round/matches/(?P<group>\d+)/(?P<time>\d+)/(?P<match>\d+)/$', views.SecondRoundMatchRecord.as_view(), name='second-round-match-record'),
url(r'^(?P<category>[a-z-]+)/second-round/matches/(?P<group>\d+)/(?P<time>\d+)/verify/$', views.SecondRoundMatchVerify.as_view(), name='second-round-match-verify'),
url(r'^(?P<category>[a-z-]+)/second-round/leaderboard/$', views.SecondRoundLeaderboard.as_view(), name='second-round-leaderboard'),
url(r'^(?P<category>[a-z-]+)/second-round/scoresheets/$', views.SecondRoundScoresheets.as_view(), name='second-round-scoresheets'),
url(r'^(?P<category>[a-z-]+)/second-round/judges/$', views.SecondRoundJudges.as_view(), name='second-round-judges'),
url(r'^second-round/leaderboard/$', views.SecondRoundLeaderboardExport.as_view(), name='second-round-leaderboard-export'),
url(r'^second-round/leaderboard/compound/$', views.SecondRoundLeaderboardExportCompound.as_view(), name='second-round-leaderboard-export-compound'),
url(r'^second-round/leaderboard/recurve/$', views.SecondRoundLeaderboardExportRecurve.as_view(), name='second-round-leaderboard-export-recurve'),
url(r'^(?P<category>[a-z-]+)/second-round/$', views.PublicSecondRound.as_view(), name='public-second-round'),
url(r'^(?P<category>[a-z-]+)/third-round/set-groups/$', views.ThirdRoundSetGroups.as_view(), name='third-round-set-groups'),
url(r'^(?P<category>[a-z-]+)/third-round/matches/$', views.ThirdRoundMatches.as_view(), name='third-round-matches'),
url(r'^(?P<category>[a-z-]+)/third-round/matches/(?P<group>\d+)/(?P<time>\d+)/(?P<match>\d+)/$', views.ThirdRoundMatchRecord.as_view(), name='third-round-match-record'),
url(r'^(?P<category>[a-z-]+)/third-round/leaderboard/$', views.ThirdRoundLeaderboard.as_view(), name='third-round-leaderboard'),
url(r'^(?P<category>[a-z-]+)/third-round/scoresheets/$', views.ThirdRoundScoresheets.as_view(), name='third-round-scoresheets'),
url(r'^(?P<category>[a-z-]+)/third-round/judges/$', views.ThirdRoundJudges.as_view(), name='third-round-judges'),
url(r'^third-round/leaderboard/$', views.ThirdRoundLeaderboardExport.as_view(), name='third-round-leaderboard-export'),
url(r'^(?P<category>[a-z-]+)/third-round/$', views.PublicThirdRound.as_view(), name='public-third-round'),
url(r'^(?P<category>[a-z-]+)/finals/set-seeds/$', views.FinalsSetSeeds.as_view(), name='finals-set-seeds'),
url(r'^finals/$', views.Finals.as_view(), name='finals'),
url(r'^finals/(?P<category>[a-z-]+)/(?P<match>\d+)/$', views.FinalsMatchRecord.as_view(), name='finals-match-record'),
url(r'^finals/(?P<category>[a-z-]+)/(?P<archer>\d+)/(?P<end>\d+)/$', views.FinalsShootdownRecord.as_view(), name='finals-shootdown-record'),
url(r'^finals/scoresheets/$', views.FinalsScoresheets.as_view(), name='finals-scoresheets'),
url(r'^(?P<category>[a-z-]+)/finals/$', views.PublicFinals.as_view(), name='public-finals'),
url(r'^results/$', views.ResultsPDF.as_view(), name='results-pdf'),
url(r'^athlete/$', views.AthleteIndex.as_view(), name='athlete-index'),
url(r'^admin/', admin.site.urls),
]
|
{
"content_hash": "251b59ae2cda7190c5a9ce6d22904373",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 176,
"avg_line_length": 81.54166666666667,
"alnum_prop": 0.6734798160449668,
"repo_name": "mjtamlyn/back2back",
"id": "d29f6db67b3ed0479cab5465d2f24c384e925f3a",
"size": "5871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "back2back/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "2168"
},
{
"name": "HTML",
"bytes": "27295"
},
{
"name": "JavaScript",
"bytes": "951"
},
{
"name": "Makefile",
"bytes": "87"
},
{
"name": "Procfile",
"bytes": "46"
},
{
"name": "Python",
"bytes": "74762"
},
{
"name": "Ruby",
"bytes": "154"
},
{
"name": "Sass",
"bytes": "1495"
},
{
"name": "TeX",
"bytes": "6421"
}
],
"symlink_target": ""
}
|
def __load():
import imp, os, sys
try:
dirname = os.path.dirname(__loader__.archive)
except NameError:
dirname = sys.prefix
path = os.path.join(dirname, 'unicodedata.pyd')
#print "py2exe extension module", __name__, "->", path
mod = imp.load_dynamic(__name__, path)
## mod.frozen = 1
__load()
del __load
|
{
"content_hash": "0e5b246eed6d9a81d1b8ea075824ff87",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 58,
"avg_line_length": 27.76923076923077,
"alnum_prop": 0.5623268698060941,
"repo_name": "hahwul/restime",
"id": "f086db19d8c746d5b8947006869bc12d67665bd5",
"size": "361",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "build/bdist.win-amd64/winexe/temp/unicodedata.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22"
},
{
"name": "Python",
"bytes": "4494"
}
],
"symlink_target": ""
}
|
import pytest
from selenium.webdriver.common.by import By
def testShouldBeAbleToDetermineTheLocationOfAnElement(driver, pages):
pages.load("xhtmlTest.html")
location = driver.find_element(By.ID, "username").location_once_scrolled_into_view
assert location["x"] > 0
assert location["y"] > 0
@pytest.mark.parametrize('page', (
'coordinates_tests/simple_page.html',
'coordinates_tests/page_with_empty_element.html',
'coordinates_tests/page_with_transparent_element.html',
'coordinates_tests/page_with_hidden_element.html'),
ids=('basic', 'empty', 'transparent', 'hidden'))
@pytest.mark.xfail_safari
def testShouldGetCoordinatesOfAnElement(page, driver, pages):
pages.load(page)
element = driver.find_element(By.ID, "box")
_check_location(element.location_once_scrolled_into_view, x=10, y=10)
_check_location(element.location, x=10, y=10)
@pytest.mark.xfail_safari
def testShouldGetCoordinatesOfAnInvisibleElement(driver, pages):
pages.load("coordinates_tests/page_with_invisible_element.html")
element = driver.find_element(By.ID, "box")
_check_location(element.location_once_scrolled_into_view, x=0, y=0)
_check_location(element.location, x=0, y=0)
def testShouldScrollPageAndGetCoordinatesOfAnElementThatIsOutOfViewPort(driver, pages):
pages.load("coordinates_tests/page_with_element_out_of_view.html")
element = driver.find_element(By.ID, "box")
windowHeight = driver.get_window_size()["height"]
_check_location(element.location_once_scrolled_into_view, x=10)
assert 0 <= element.location_once_scrolled_into_view["y"] <= (windowHeight - 100)
_check_location(element.location, x=10, y=5010)
@pytest.mark.xfail_chrome
@pytest.mark.xfail_chromiumedge
@pytest.mark.xfail_firefox
@pytest.mark.xfail_remote
@pytest.mark.xfail_safari
def testShouldGetCoordinatesOfAnElementInAFrame(driver, pages):
pages.load("coordinates_tests/element_in_frame.html")
driver.switch_to.frame(driver.find_element(By.NAME, "ifr"))
element = driver.find_element(By.ID, "box")
_check_location(element.location_once_scrolled_into_view, x=25, y=25)
_check_location(element.location, x=10, y=10)
@pytest.mark.xfail_chrome
@pytest.mark.xfail_chromiumedge
@pytest.mark.xfail_firefox
@pytest.mark.xfail_remote
@pytest.mark.xfail_safari
def testShouldGetCoordinatesOfAnElementInANestedFrame(driver, pages):
pages.load("coordinates_tests/element_in_nested_frame.html")
driver.switch_to.frame(driver.find_element(By.NAME, "ifr"))
driver.switch_to.frame(driver.find_element(By.NAME, "ifr"))
element = driver.find_element(By.ID, "box")
_check_location(element.location_once_scrolled_into_view, x=40, y=40)
_check_location(element.location, x=10, y=10)
def testShouldGetCoordinatesOfAnElementWithFixedPosition(driver, pages):
pages.load("coordinates_tests/page_with_fixed_element.html")
element = driver.find_element(By.ID, "fixed")
_check_location(element.location_once_scrolled_into_view, y=0)
_check_location(element.location, y=0)
driver.find_element(By.ID, "bottom").click()
_check_location(element.location_once_scrolled_into_view, y=0)
assert element.location["y"] > 0
def testShouldCorrectlyIdentifyThatAnElementHasWidthAndHeight(driver, pages):
pages.load("xhtmlTest.html")
shrinko = driver.find_element(By.ID, "linkId")
size = shrinko.size
assert size["width"] > 0
assert size["height"] > 0
def _check_location(location, **kwargs):
try:
# python 2.x
expected = kwargs.viewitems()
actual = location.viewitems()
except AttributeError:
# python 3.x
expected = kwargs.items()
actual = location.items()
assert expected <= actual
|
{
"content_hash": "60154b13ea96ca3c4005acacd2e41b20",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 87,
"avg_line_length": 37.95959595959596,
"alnum_prop": 0.7272485364555614,
"repo_name": "Dude-X/selenium",
"id": "a6240f84989dec08313bcc15e30506be50eb66d3",
"size": "4546",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "py/test/selenium/webdriver/common/position_and_size_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "825"
},
{
"name": "Batchfile",
"bytes": "348"
},
{
"name": "C",
"bytes": "42319"
},
{
"name": "C#",
"bytes": "2794338"
},
{
"name": "C++",
"bytes": "1978395"
},
{
"name": "CSS",
"bytes": "27283"
},
{
"name": "HTML",
"bytes": "1853735"
},
{
"name": "Java",
"bytes": "4871670"
},
{
"name": "JavaScript",
"bytes": "4757113"
},
{
"name": "Makefile",
"bytes": "4655"
},
{
"name": "Python",
"bytes": "873440"
},
{
"name": "Ragel",
"bytes": "3086"
},
{
"name": "Ruby",
"bytes": "912322"
},
{
"name": "Shell",
"bytes": "2859"
},
{
"name": "XSLT",
"bytes": "1047"
}
],
"symlink_target": ""
}
|
from .plot_and_save import plot_and_save
from .style import CATEGORICAL_COLORS
def plot_points(
xs,
ys,
names=None,
modes=None,
texts=None,
markers=None,
textpositions=None,
layout_width=None,
layout_height=None,
title=None,
xaxis_title=None,
yaxis_title=None,
legend_orientation=None,
html_file_path=None,
plotly_html_file_path=None,
):
layout = dict(
width=layout_width,
height=layout_height,
title=title,
xaxis=dict(title=xaxis_title),
yaxis=dict(title=yaxis_title),
legend=dict(orientation=legend_orientation),
hovermode="closest",
)
data = []
for i, (x, y) in enumerate(zip(xs, ys)):
if names is None:
name = None
else:
name = names[i]
if modes is None:
mode = "markers"
else:
mode = modes[i]
if texts is None:
text = None
else:
text = texts[i]
if markers is None:
marker = dict(color=CATEGORICAL_COLORS[i])
else:
marker = markers[i]
if textpositions is None:
textposition = None
else:
textposition = textpositions[i]
data.append(
dict(
type="scatter",
name=name,
x=x,
y=y,
mode=mode,
text=text,
marker=marker,
textposition=textposition,
)
)
plot_and_save(dict(layout=layout, data=data), html_file_path, plotly_html_file_path)
|
{
"content_hash": "e714f7e5d0a76ffd2d2c337ffadb640f",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 88,
"avg_line_length": 18.477777777777778,
"alnum_prop": 0.5063138905592303,
"repo_name": "KwatME/plot",
"id": "1cf6c06f527b9b1517a6f214d5963ed90f037597",
"size": "1663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plot/plot_points.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "8718"
},
{
"name": "Python",
"bytes": "32607"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
import inspect
import logging
import sys
import os
import getpass
import six
import uuid
from collections import defaultdict
import functools
import dataset
import pandas as pd
import socket
import os.path
import yaml
from six.moves import input
try:
from traitlets import (
HasTraits, CUnicode, List, CInt, Instance, Enum,
CFloat, CBool)
except ImportError:
from IPython.utils.traitlets import (
HasTraits, CUnicode, List, CInt, Instance, Enum,
CFloat, CBool)
logger = logging.getLogger(__name__)
# Whether we are running from NERSC
ON_NERSC = 'METATLAS_LOCAL' not in os.environ
logger.info('NERSC=%s', ON_NERSC)
# Observable List from
# http://stackoverflow.com/a/13259435
def callback_method(func):
def notify(self, *args, **kwargs):
if not hasattr(self, '_callbacks'):
return func(self, *args, **kwargs)
for _, callback in self._callbacks:
callback()
return func(self, *args, **kwargs)
return notify
class NotifyList(list):
extend = callback_method(list.extend)
append = callback_method(list.append)
remove = callback_method(list.remove)
pop = callback_method(list.pop)
__delitem__ = callback_method(list.__delitem__)
__setitem__ = callback_method(list.__setitem__)
__iadd__ = callback_method(list.__iadd__)
__imul__ = callback_method(list.__imul__)
def __getitem__(self, item):
if isinstance(item, slice):
return self.__class__(list.__getitem__(self, item))
else:
return list.__getitem__(self, item)
def __init__(self, *args):
list.__init__(self, *args)
self._callbacks = []
self._callback_cntr = 0
def register_callback(self, cb):
self._callbacks.append((self._callback_cntr, cb))
self._callback_cntr += 1
return self._callback_cntr - 1
def unregister_callback(self, cbid):
for idx, (i, cb) in enumerate(self._callbacks):
if i == cbid:
self._callbacks.pop(idx)
return cb
else:
return None
def set_docstring(cls):
"""Set the docstring for a MetatlasObject object"""
doc = cls.__doc__
if not doc:
doc = cls.__name__ + ' object.'
doc += '\n\nParameters\n----------\n'
for (tname, trait) in sorted(cls.class_traits().items()):
if tname.startswith('_'):
continue
descr = trait.__class__.__name__.lower()
if descr.startswith('c'):
descr = descr[1:]
elif descr == 'enum':
descr = '{' + ', '.join(trait.values) + '}'
doc += '%s: %s\n' % (tname, descr)
help_text = trait.help#get_metadata('help')
if not help_text:
help_text = '%s value.' % tname
help_text = help_text.strip()
if help_text.endswith('.'):
help_text = help_text[:-1]
if trait.metadata.get('readonly', False):
help_text += ' (read only)'
help_text += '.'
doc += ' %s\n' % help_text
cls.__doc__ = doc
return cls
def _get_subclasses(cls):
return cls.__subclasses__() + [g for s in cls.__subclasses__()
for g in _get_subclasses(s)]
class Workspace(object):
instance = None
def __init__(self):
# get metatlas directory since notebooks and scripts could be launched
# from other locations
# this directory contains the config files
metatlas_dir = os.path.dirname(sys.modules[self.__class__.__module__].__file__)
if ON_NERSC:
with open(os.path.join(metatlas_dir, 'nersc_config', 'nersc.yml')) as fid:
nersc_info = yaml.safe_load(fid)
with open(nersc_info['db_passwd_file']) as fid:
pw = fid.read().strip()
self.path = 'mysql+pymysql://meta_atlas_admin:%s@nerscdb04.nersc.gov/%s' % (pw, nersc_info['db_name'])
else:
local_config_file = os.path.join(metatlas_dir, 'local_config', 'local.yml')
if os.path.isfile(local_config_file):
with open(local_config_file) as fid:
local_info = yaml.load(fid)
hostname = 'localhost' if 'db_hostname' not in local_info else local_info['db_hostname']
login = ''
if 'db_username' in local_info:
if 'db_password' in local_info:
login = f"{local_info['db_username']}:{local_info['db_password']}@"
else:
login = f"{local_info['db_username']}@"
self.path = f"mysql+pymysql://{login}{hostname}/{local_info['db_name']}"
else:
filename = f"{getpass.getuser()}_workspace.db"
self.path = f"sqlite:///{filename}"
if os.path.exists(filename):
os.chmod(filename, 0o775)
logger.debug('Using database at: %s', self.path)
self.tablename_lut = dict()
self.subclass_lut = dict()
from .metatlas_objects import MetatlasObject
for klass in _get_subclasses(MetatlasObject):
name = klass.__name__.lower()
self.subclass_lut[name] = klass
if name.endswith('s'):
self.subclass_lut[name + 'es'] = klass
self.tablename_lut[klass] = name + 'es'
else:
self.subclass_lut[name + 's'] = klass
self.tablename_lut[klass] = name + 's'
# handle circular references
self.seen = dict()
Workspace.instance = self
self.db = None
@classmethod
def get_instance(cls):
"""Returns a existing instance of Workspace or creates a new one"""
if Workspace.instance is None:
return Workspace()
return Workspace.instance
def get_connection(self):
"""
Get a re-useable connection to the database.
Each activity that queries the database needs to have this function preceeding it.
"""
if self.db is None:
self.db = dataset.connect(self.path)
else:
self.db.begin()
try:
self.db.query('SELECT 1')
self.db.commit()
except Exception:
self.db.rollback()
self.db = dataset.connect(self.path)
assert self.db is not None
def close_connection(self):
"""close database connections"""
if self.db is not None:
self.db.close()
self.db = None
def convert_to_double(self, table, entry):
"""Convert a table column to double type."""
self.get_connection()
self.db.begin()
try:
self.db.query('alter table `%s` modify `%s` double' % (table, entry))
self.db.commit()
except Exception as err:
rollback_and_log(self.db, err)
def save_objects(self, objects, _override=False):
"""Save objects to the database"""
logger.debug('Entering Workspace.save_objects')
if not isinstance(objects, (list, set)):
objects = [objects]
self._seen = dict()
self._link_updates = defaultdict(list)
self._updates = defaultdict(list)
self._inserts = defaultdict(list)
for obj in objects:
self._get_save_data(obj, _override)
if self._inserts:
logger.debug('Workspace._inserts=%s', self._inserts)
if self._updates:
logger.debug('Workspace._updates=%s', self._updates)
if self._link_updates:
logger.debug('Workspace._link_updates=%s', self._link_updates)
self.get_connection()
self.db.begin()
try:
for (table_name, updates) in self._link_updates.items():
if table_name not in self.db:
continue
for (uid, prev_uid) in updates:
self.db.query('update `%s` set source_id = "%s" where source_id = "%s"' %
(table_name, prev_uid, uid))
for (table_name, updates) in self._updates.items():
if '_' not in table_name and table_name not in self.db:
self.db.create_table(table_name, primary_id='unique_id',
primary_type=self.db.types.string(32))
if 'sqlite' not in self.path:
self.fix_table(table_name)
for (uid, prev_uid) in updates:
self.db.query('update `%s` set unique_id = "%s" where unique_id = "%s"' %
(table_name, prev_uid, uid))
for (table_name, inserts) in self._inserts.items():
if '_' not in table_name and table_name not in self.db:
self.db.create_table(table_name, primary_id='unique_id',
primary_type=self.db.types.string(32))
if 'sqlite' not in self.path:
self.fix_table(table_name)
self.db[table_name].insert_many(inserts)
logger.debug('inserting %s', inserts)
self.db.commit()
except Exception as err:
rollback_and_log(self.db, err)
def create_link_tables(self, klass):
"""
Create a link table in the database of the given trait klass
"""
name = self.table_name[klass]
self.get_connection()
self.db.begin()
try:
for (tname, trait) in klass.class_traits().items():
if isinstance(trait, MetList):
table_name = '_'.join([name, tname])
if table_name not in self.db:
self.db.create_table(table_name)
link = dict(source_id=uuid.uuid4().hex,
head_id=uuid.uuid4().hex,
target_id=uuid.uuid4().hex,
target_table=uuid.uuid4().hex)
self.db[table_name].insert(link)
self.db.commit()
except Exception as err:
rollback_and_log(self.db, err)
def _get_save_data(self, obj, override=False):
"""Get the data that will be used to save an object to the database"""
if obj.unique_id in self._seen:
return
if isinstance(obj, Stub):
return
name = self.tablename_lut[obj.__class__]
self._seen[obj.unique_id] = True
changed, prev_uid = obj._update(override)
state = dict()
for (tname, trait) in obj.traits().items():
if tname.startswith('_'):
continue
if isinstance(trait, List):
# handle a list of objects by using a Link table
# create the link table if necessary
table_name = '_'.join([name, tname])
if changed and prev_uid:
self._link_updates[table_name].append((obj.unique_id,
obj.prev_uid))
value = getattr(obj, tname)
# do not store this entry in our own table
if not value:
continue
# create an entry in the table for each item
# store the item in its own table
for subvalue in value:
self._get_save_data(subvalue, override)
link = dict(source_id=obj.unique_id,
head_id=obj.head_id,
target_id=subvalue.unique_id,
target_table=subvalue.__class__.__name__.lower() + 's')
if changed:
self._inserts[table_name].append(link)
elif isinstance(trait, MetInstance):
value = getattr(obj, tname)
# handle a sub-object
# if it is not assigned, use and empty unique_id
if value is None:
state[tname] = ''
# otherwise, store the uid and allow the object to store
# itself
else:
state[tname] = value.unique_id
self._get_save_data(value, override)
elif changed:
value = getattr(obj, tname)
# store the raw value in this table
state[tname] = value
if prev_uid and changed:
self._updates[name].append((obj.unique_id, obj.prev_uid))
else:
state['prev_uid'] = ''
if changed:
self._inserts[name].append(state)
def fix_table(self, table_name):
"""Fix a table by converting floating point values to doubles"""
klass = self.subclass_lut.get(table_name, None)
if not klass:
return
table_name = self.tablename_lut[klass]
for (tname, trait) in klass.class_traits().items():
if isinstance(trait, MetFloat):
self.convert_to_double(table_name, tname)
def retrieve(self, object_type, **kwargs):
"""Retrieve an object from the database."""
object_type = object_type.lower()
klass = self.subclass_lut.get(object_type, None)
items = []
self.get_connection()
self.db.begin()
try:
if object_type not in self.db:
if not klass:
raise ValueError('Unknown object type: %s' % object_type)
object_type = self.tablename_lut[klass]
if '_' not in object_type:
if kwargs.get('username', '') in ['*', 'all']:
kwargs.pop('username')
else:
kwargs.setdefault('username', getpass.getuser())
# Example query if group id is given
# SELECT *
# FROM tablename
# WHERE (city = 'New York' AND name like 'IBM%')
# Example query where unique id and group id are not given
# (to avoid getting all versions of the same object)
# http://stackoverflow.com/a/12102288
# SELECT *
# from (SELECT * from `groups`
# WHERE (name='spam') ORDER BY last_modified)
# x GROUP BY head_id
query = 'select * from `%s` where (' % object_type
clauses = []
for (key, value) in kwargs.items():
if isinstance(value, list) and len(value) > 0:
clauses.append('%s in ("%s")' % (key, '", "'.join(value)))
elif not isinstance(value, six.string_types):
clauses.append("%s = %s" % (key, value))
elif '%%' in value:
clauses.append('%s = "%s"' % (key, value.replace('%%', '%')))
elif '%' in value:
clauses.append('%s like "%s"' % (key, value.replace('*', '%')))
else:
clauses.append('%s = "%s"' % (key, value))
query += ' and '.join(clauses) + ')'
if not clauses:
query = query.replace(' where ()', '')
try:
items = list(self.db.query(query))
except Exception as err:
if 'Unknown column' in str(err):
keys = [k for k in klass.class_traits().keys()
if not k.startswith('_')]
raise ValueError('Invalid column name, valid columns: %s' % keys) from err
raise err
items = [klass(**i) for i in items]
uids = [i.unique_id for i in items]
if not items:
return []
# get stubs for each of the list items
for (tname, trait) in items[0].traits().items():
if isinstance(trait, List):
table_name = '_'.join([object_type, tname])
if table_name not in self.db:
for i in items:
setattr(i, tname, [])
continue
querystr = 'select * from `%s` where source_id in ("' % table_name
querystr += '" , "'.join(uids)
result = self.db.query(querystr + '")')
sublist = defaultdict(list)
for r in result:
stub = Stub(unique_id=r['target_id'],
object_type=r['target_table'])
sublist[r['source_id']].append(stub)
for i in items:
setattr(i, tname, sublist[i.unique_id])
elif isinstance(trait, MetInstance):
pass
for i in items:
if not i.prev_uid:
i.prev_uid = 'origin'
i._changed = False
items.sort(key=lambda x: x.last_modified)
self.db.commit()
except Exception as err:
rollback_and_log(self.db, err)
return items
def remove(self, object_type, **kwargs):
"""Remove an object from the database"""
override = kwargs.pop('_override', False)
if not override:
msg = 'Are you sure you want to delete the entries? (Y/N)'
ans = eval(input(msg))
if not ans[0].lower().startswith('y'):
print('Aborting')
return
object_type = object_type.lower()
klass = self.subclass_lut.get(object_type, None)
if not klass:
raise ValueError('Unknown object type: %s' % object_type)
object_type = self.tablename_lut[klass]
kwargs.setdefault('username', getpass.getuser())
# Example query:
# DELETE *
# FROM tablename
# WHERE (city = 'New York' AND name like 'IBM%')
query = 'delete from `%s` where (' % object_type
clauses = []
for (key, value) in kwargs.items():
if not isinstance(value, six.string_types):
clauses.append("%s = %s" % (key, value))
continue
if '%%' in value:
clauses.append('%s = "%s"' % (key, value.replace('%%', '%')))
elif '%' in value:
clauses.append('%s like "%s"' % (key, value.replace('*', '%')))
else:
clauses.append('%s = "%s"' % (key, value))
query += ' and '.join(clauses)
query += ')'
if not clauses:
query = query.replace(' where ()', '')
self.get_connection()
self.db.begin()
try:
# check for lists items that need removal
if any([isinstance(i, MetList) for i in klass.class_traits().values()]):
uid_query = query.replace('delete ', 'select unique_id ')
uids = [i['unique_id'] for i in self.db.query(uid_query)]
sub_query = 'delete from `%s` where source_id in ("%s")'
for (tname, trait) in klass.class_traits().items():
table_name = '%s_%s' % (object_type, tname)
if not uids or table_name not in self.db:
continue
if isinstance(trait, MetList):
table_query = sub_query % (table_name, '", "'.join(uids))
try:
self.db.query(table_query)
except Exception as e:
print(e)
try:
self.db.query(query)
except Exception as e:
if 'Unknown column' in str(e):
keys = [k for k in klass.class_traits().keys()
if not k.startswith('_')]
raise ValueError('Invalid column name, valid columns: %s' % keys)
else:
raise e
print('Removed')
self.db.commit()
except Exception as err:
rollback_and_log(self.db, err)
def remove_objects(self, objects, all_versions=True, **kwargs):
"""Remove a list of objects from the database."""
if not isinstance(objects, (list, set)):
objects = [objects]
if not objects:
print('No objects selected')
return
override = kwargs.pop('_override', False)
if not override:
msg = ('Are you sure you want to delete the %s object(s)? (Y/N)'
% len(objects))
ans = eval(input(msg))
if not ans[0].lower().startswith('y'):
print('Aborting')
return
ids = defaultdict(list)
username = getpass.getuser()
attr = 'head_id' if all_versions else 'unique_id'
self.get_connection()
self.db.begin()
try:
for obj in objects:
if not override and obj.username != username:
continue
name = self.tablename_lut[obj.__class__]
ids[name].append(getattr(obj, attr))
# remove list items as well
for (tname, trait) in obj.traits().items():
if isinstance(trait, MetList):
subname = '%s_%s' % (name, tname)
ids[subname].append(getattr(obj, attr))
for (table_name, uids) in ids.items():
if table_name not in self.db:
continue
query = 'delete from `%s` where %s in ("'
query = query % (table_name, attr)
query += '" , "'.join(uids)
query += '")'
self.db.query(query)
print(('Removed %s object(s)' % len(objects)))
self.db.commit()
except Exception as err:
rollback_and_log(self.db, err)
def format_timestamp(tstamp):
"""Get a formatted representation of a timestamp."""
try:
ts = pd.Timestamp.fromtimestamp(int(tstamp))
return ts.isoformat()
except Exception:
return str(tstamp)
class MetList(List):
allow_none = True
def validate(self, obj, value):
# value = super(MetList, self).validate(obj, value)
value = super().validate(obj, value)
value = NotifyList(value)
#value.register_callback(lambda: setattr(obj, '_changed', True))
callback = functools.partial(setattr, obj, '_changed', True)
value.register_callback(callback)
return value
class MetUnicode(CUnicode):
allow_none = True
class MetFloat(CFloat):
allow_none = True
class MetInt(CInt):
allow_none = True
class MetBool(CBool):
allow_none = True
class Stub(HasTraits):
unique_id = MetUnicode()
object_type = MetUnicode()
def retrieve(self):
return Workspace.instance.retrieve(self.object_type, username='*',
unique_id=self.unique_id)[0]
def __repr__(self):
return '%s %s' % (self.object_type.capitalize(),
self.unique_id)
def __str__(self):
return str(self.unique_id)
class MetInstance(Instance):
allow_none = True
def validate(self, obj, value):
if isinstance(value, (self.klass, Stub)):
return value
elif isinstance(value, six.string_types):
if value:
return Stub(unique_id=value,
object_type=self.klass.__name__)
else:
return None
else:
self.error(obj, value)
class MetEnum(Enum):
allow_none = True
def get_from_nersc(user, relative_path):
"""Load a remote data file from NERSC to an H5 file
Parameters
----------
user : str
NERSC user account
relative_path : str
Path to file from "/project/projectdirs/metatlas/original_data/<user>/"
"""
import pexpect
from IPython.display import clear_output
cmd = 'scp -o StrictHostKeyChecking=no '
path = "/project/projectdirs/metatlas/original_data/%s/%s"
path = path % (user, relative_path)
cmd += '%s@edisongrid.nersc.gov:%s . && echo "Download Complete"'
cmd = cmd % (user, path)
print(cmd)
proc = pexpect.spawn(cmd)
proc.expect("assword:*")
passwd = eval(input())
clear_output()
proc.send(passwd)
proc.send('\r')
proc.expect('Download Complete')
proc.close()
return os.path.abspath(os.path.basename(relative_path))
def rollback_and_log(db_connection, err):
"""
inputs:
db_connection: a dataset instance in a transaction that needs to be rolled back
err: exception instance that ended the transaction
"""
caller_name = inspect.stack()[1][3]
db_connection.rollback()
logger.error("Transaction rollback within %s()", caller_name)
logger.exception(err)
raise err
|
{
"content_hash": "c8b2846ac35dc947738b17ae23c11277",
"timestamp": "",
"source": "github",
"line_count": 659,
"max_line_length": 118,
"avg_line_length": 38.12139605462823,
"alnum_prop": 0.5150067669771515,
"repo_name": "metabolite-atlas/metatlas",
"id": "88f1479c43721bee5c19c64e57b9a43e0adef49b",
"size": "25122",
"binary": false,
"copies": "1",
"ref": "refs/heads/oo_mads",
"path": "metatlas/datastructures/object_helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "121821"
},
{
"name": "Makefile",
"bytes": "1333"
},
{
"name": "Python",
"bytes": "167786"
},
{
"name": "R",
"bytes": "78"
},
{
"name": "Shell",
"bytes": "2327"
}
],
"symlink_target": ""
}
|
class BERTRPCError(Exception):
def __init__(self, msg = None, klass = None, bt = []):
Exception.__init__(self, msg)
if type(msg) == type(list()):
code, message = msg[0], msg[1:]
else:
code, message = [0, msg]
self.code = code
self.message = message
self.klass = klass
self.bt = bt
def __str__(self):
details = []
if self.bt is not None and len(self.bt) > 0:
details.append('Traceback:\n%s\n' % ('\n'.join(self.bt)))
if self.klass is not None:
details.append('Class: %s\n' % self.klass)
if self.code is not None:
details.append('Code: %s\n' % self.code)
details.append('%s: %s' % (self.__class__.__name__, self.message))
return ''.join(details)
# override the python 2.6 DeprecationWarning re: 'message' property
def _get_message(self): return self._message
def _set_message(self, message): self._message = message
message = property(_get_message, _set_message)
class RemoteError(BERTRPCError):
pass
class ConnectionError(BERTRPCError):
pass
class ReadTimeoutError(BERTRPCError):
pass
class ProtocolError(BERTRPCError):
NO_HEADER = [0, "Unable to read length header from server."]
NO_DATA = [1, "Unable to read data from server."]
class ServerError(BERTRPCError):
pass
class UserError(BERTRPCError):
pass
class ProxyError(BERTRPCError):
pass
class InvalidRequest(BERTRPCError):
pass
class InvalidOption(BERTRPCError):
pass
|
{
"content_hash": "bf0de18b8d7559346eb1c65f9854290e",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 74,
"avg_line_length": 24.71875,
"alnum_prop": 0.6030341340075853,
"repo_name": "mjrusso/python-bertrpc",
"id": "870b5063168b205f1d7d8ce42dd933e9ed8cf16a",
"size": "1582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bertrpc/error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10546"
}
],
"symlink_target": ""
}
|
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex+1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(PKG_NAME + ".wgt"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith(".wgt"):
continue
elif item.endswith("inst.py"):
continue
else:
item_name = os.path.basename(item)
if not doRemoteCopy(item, "%s/%s" % (PKG_SRC_DIR, item_name)):
#if not doRemoteCopy(item, PKG_SRC_DIR):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
{
"content_hash": "09f5b0bef9cb8ed6500f4c9d1706ea66",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 106,
"avg_line_length": 30.398190045248867,
"alnum_prop": 0.5410836558499553,
"repo_name": "jacky-young/crosswalk-test-suite",
"id": "cc08fdccbf8c65dee48f57d187406630b990d1fc",
"size": "6741",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "usecase/usecase-wrt-tizen-tests/inst.wgt.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "820234"
},
{
"name": "CoffeeScript",
"bytes": "18978"
},
{
"name": "Cucumber",
"bytes": "65825"
},
{
"name": "GLSL",
"bytes": "3495"
},
{
"name": "Groff",
"bytes": "12"
},
{
"name": "HTML",
"bytes": "39686628"
},
{
"name": "Java",
"bytes": "284601"
},
{
"name": "JavaScript",
"bytes": "17553033"
},
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "PHP",
"bytes": "44946"
},
{
"name": "Python",
"bytes": "4264875"
},
{
"name": "Shell",
"bytes": "1097373"
},
{
"name": "XSLT",
"bytes": "767778"
}
],
"symlink_target": ""
}
|
from io import StringIO
from skbio import TreeNode
from qiime2.plugin import SemanticType
from qiime2.plugin.util import transform
from ..plugin_setup import plugin
from . import NewickDirectoryFormat
Phylogeny = SemanticType('Phylogeny', field_names=['type'])
Rooted = SemanticType('Rooted', variant_of=Phylogeny.field['type'])
Unrooted = SemanticType('Unrooted', variant_of=Phylogeny.field['type'])
Hierarchy = SemanticType('Hierarchy')
plugin.register_semantic_types(Phylogeny, Rooted, Unrooted, Hierarchy)
plugin.register_artifact_class(
Phylogeny[Unrooted], NewickDirectoryFormat,
"A phylogenetic tree not containing a defined root.")
# Phylogeny[Rooted] import usage example
def phylogeny_rooted_usage(use):
def factory():
from q2_types.tree import NewickFormat
tree = TreeNode.read(StringIO(
'(SEQUENCE1:0.000000003,SEQUENCE2:0.000000003);'))
ff = transform(tree, to_type=NewickFormat)
ff.validate()
return ff
to_import = use.init_format('my-tree', factory, ext='.tre')
use.import_from_format('tree',
semantic_type='Phylogeny[Rooted]',
variable=to_import,
view_type='NewickFormat')
plugin.register_artifact_class(
Phylogeny[Rooted], NewickDirectoryFormat,
"A phylogenetic tree containing a defined root.",
examples={'Import rooted phylogenetic tree': phylogeny_rooted_usage})
plugin.register_semantic_type_to_format(Hierarchy,
directory_format=NewickDirectoryFormat)
|
{
"content_hash": "ddb3acda5c6f33ccfb08106e5cfcb255",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 31.294117647058822,
"alnum_prop": 0.6854636591478697,
"repo_name": "qiime2/q2-types",
"id": "0ab94e82aa915089ca09e7d66de0b79aef684204",
"size": "1946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "q2_types/tree/_type.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "269"
},
{
"name": "Python",
"bytes": "418135"
},
{
"name": "TeX",
"bytes": "1121"
}
],
"symlink_target": ""
}
|
"""
A module for all environmental layers functionality.
.. moduleauthor:: Daniela Remenska <remenska@gmail.com>
"""
import logging
from enum import Enum
import pprint
from rasterio.warp import calculate_default_transform, RESAMPLING
# from iSDM.species import IUCNSpecies
import numpy as np
from geopandas import GeoSeries, GeoDataFrame
import rasterio
from rasterio.transform import Affine
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import pandas as pd
from shapely.geometry import Point
from matplotlib.collections import PatchCollection
from descartes import PolygonPatch
import shapely
from rasterio import features
from shapely.geometry import Polygon
import gc
logger = logging.getLogger('iSDM.environment')
logger.setLevel(logging.DEBUG)
class Source(Enum):
"""
Possible sources of global environmental data.
"""
WORLDCLIM = 1
GLOBE = 2
UNKNOWN = 3
TNC = 4
ARCGIS = 5
WWL = 6
class EnvironmentalLayer(object):
"""
EnvironmentalLayer
A generic EnvironmentalLayer class used for subclassing different global-scale environmental data sources.
:ivar source: The source of the global environmental data.
:vartype source: iSDM.environment.Source
:ivar name_layer: Description of the layer
:vartype name_layer: string
"""
def __init__(self, source=None, file_path=None, name_layer=None, **kwargs):
if source:
if not isinstance(source, Source):
raise AttributeError("The source can only be one of the following: %s " % list(Source.__members__))
self.source = source
else:
self.source = Source.UNKNOWN
if file_path:
self.file_path = file_path
self.name_layer = name_layer
def load_data(self, file_path=None):
"""
Needs to be implemented in a subclass.
"""
raise NotImplementedError("You need to implement this method in a subclass!")
def set_source(self, source):
if not isinstance(source, Source):
raise AttributeError("The source can only be one of the following: %s " % list(Source.__members__))
self.source = source
def get_source(self):
return self.source.name
def save_data(self, full_name=None, dir_name=None, file_name=None):
"""
Needs to be implemented in a subclass.
"""
raise NotImplementedError("You need to implement this method in a subclass!")
def get_data(self):
"""
Needs to be implemented in a subclass.
"""
raise NotImplementedError("You need to implement this method in a subclass!")
class RasterEnvironmentalLayer(EnvironmentalLayer):
"""
RasterEnvironmentalLayer
A class for encapsulating the raster environmental layer functionality. Operations such as reprojecting,
overlaying, sampling pseudo-absence pixels, converting to world map coordinates, are some of the functionalities
implemented as wrappers around corresponding rasterio/Numpy operations and methods.
This class should be used when the expected layer data is in raster format, i.e., 2-dimensional (multi-band) array of data.
:ivar file_path: Location of the raster file from which the raster map data is loaded.
:vartype file_path: string
:ivar raster_affine: Affine translation used in the environmental raster map.
:vartype raster_affine: rasterio.transform.Affine
:ivar resolution: The resolution of the raster map, as a tuple (height, width) in pixels.
:vartype resolution: tuple(int, int)
:ivar raster_reader: file reader for the corresponding rasterized data.
:vartype raster_reader: rasterio._io.RasterReader
"""
def __init__(self, source=None, file_path=None, name_layer=None, **kwargs):
EnvironmentalLayer.__init__(self, source, file_path, name_layer, **kwargs)
def load_data(self, file_path=None):
"""
Loads the raster data from a previously-saved raster file. Provides information about the
loaded data, and returns a rasterio file reader handle, which allows you to read individual raster bands.
:param string file_path: The full path to the targed GeoTIFF raster file (including the directory and filename in one string).
:returns: Rasterio RasterReader file object which can be used to read individual bands from the raster file.
:rtype: rasterio._io.RasterReader
"""
if file_path:
self.file_path = file_path
if not self.file_path:
raise AttributeError("Please provide a file_path to read raster environment data from.")
src = rasterio.open(self.file_path)
logger.info("Loaded raster data from %s " % self.file_path)
logger.info("Driver name: %s " % src.driver)
pp = pprint.PrettyPrinter(depth=5)
self.metadata = src.meta
logger.info("Metadata: %s " % pp.pformat(self.metadata))
logger.info("Resolution: x_res={0} y_res={1}.".format(src.width, src.height))
logger.info("Bounds: %s " % (src.bounds,))
logger.info("Coordinate reference system: %s " % src.crs)
logger.info("Affine transformation: %s " % (src.affine.to_gdal(),))
logger.info("Number of layers: %s " % src.count)
logger.info("Dataset loaded. Use .read() or .read_masks() to access the layers.")
self.raster_affine = src.affine
self.resolution = src.res
self.bounds = src.bounds
self.raster_reader = src
return self.raster_reader
def pixel_to_world_coordinates(self,
raster_data=None,
filter_no_data_value=True,
no_data_value=0,
band_number=1):
"""
Map the pixel coordinates to world coordinates. The affine transformation matrix is used for this purpose.
The convention is to reference the pixel corner. To reference the pixel center instead, we translate each pixel by 50%.
The "no value" pixels (cells) can be filtered out.
A dataset's pixel coordinate system has its origin at the "upper left" (imagine it displayed on your screen).
Column index increases to the right, and row index increases downward. The mapping of these coordinates to
"world" coordinates in the dataset's reference system is done with an affine transformation matrix.
param string raster_data: the raster data (2-dimensional array) to translate to world coordinates. If not provided, \
it tries to load existing rasterized data about the RasterEnvironmentalLayer.
:param int no_data_value: The pixel values depicting non-burned cells. Default is 0.
:param bool filter_no_data_value: Whether to filter-out the no-data pixel values. Default is true. If set to \
false, all pixels in a 2-dimensional array will be converted to world coordinates. Typically this option is used \
to get a "base" map of the coordinates of all pixels in an image (map).
:returns: A tuple of numpy ndarrays. The first array contains the latitude values for each \
non-zero cell, the second array contains the longitude values for each non-zero cell.
TODO: document raster-affine
:rtype: tuple(np.ndarray, np.ndarray)
"""
if raster_data is None:
logger.info("No raster data provided, attempting to load default...")
try:
raster_data = self.load_data(self.file_path).read(band_number) # we work on one layer, the first
logger.info("Succesfully loaded existing raster data from %s." % self.file_path)
except AttributeError as e:
logger.error("Could not open raster file. %s " % str(e))
raise AttributeError(e)
logger.info("Transforming to world coordinates...")
# first get the original Affine transformation matrix
if hasattr(self, "raster_affine"):
T0 = self.raster_affine
else: # default from arguments
# try to deduce it
logger.info("No Affine translation defined for this layer, trying to deduce it.")
x_min, y_min, x_max, y_max = -180, -90, 180, 90
pixel_size = (x_max - x_min) / raster_data.shape[1]
if pixel_size != (y_max - y_min) / raster_data.shape[0]:
logger.error("Could not deduce Affine transformation...possibly the pixel is not a square.")
return
T0 = Affine(pixel_size, 0.0, x_min, 0.0, -pixel_size, y_max)
# convert it to gdal format (it is otherwise flipped)
T0 = Affine(*reversed(T0.to_gdal()))
logger.debug("Affine transformation T0:\n %s " % (T0,))
# shift by 50% to get the pixel center
T1 = T0 * Affine.translation(0.5, 0.5)
# apply the shift, filtering out no_data_value values
logger.debug("Raster data shape: %s " % (raster_data.shape,))
logger.debug("Affine transformation T1:\n %s " % (T1,))
if filter_no_data_value:
logger.info("Filtering out no_data pixels.")
raster_data = np.where(raster_data != no_data_value, raster_data, np.nan)
coordinates = (T1 * np.where(~np.isnan(raster_data)))
else:
coordinates = (T1 * np.where(raster_data))
logger.info("Transformation to world coordinates completed.")
return coordinates
@classmethod
def __geometrize__(cls,
data,
latitude_col_name='decimallatitude',
longitude_col_name='decimallongitude',
crs=None,
dropna=True):
"""
Private helper class function.
Converts data from pandas.DataFrame contents to geopandas.GeoDataFrame format.
GeoDataFrames inherit basic DataFrames, and provide more functionality on top of pandas.
The biggest difference in terms of the data layout is the addition of a 'geometry' column which contains
`Shapely <http://toblerity.org/shapely/shapely.geometry.html>`_ geometries in `geopandas <http://geopandas.org/user.html>`_.
The decimallatitude and decimallongitude columns are converted into shapely Point geometry, one Point for each latitude/longitude
record.
:param bool dropna: Whether to drop records with NaN values in the decimallatitude or decimallongitude columns in the conversion process.
:param string longitude_col_name: The name of the column carrying the decimal longitude values. Default is 'decimallongitude'.
:param string latitude_col_name: The name of the column carrying the decimal latitude values. Default is 'decimallatitude'
:param crs: The Coordinate Reference System of the data. Default is "EPSG:4326"
:type crs: string or dictionary.
:returns: geopandas.GeoDataFrame
"""
if not isinstance(data, pd.DataFrame) or not data:
logger.info("Please provide the data parameter as a pandas.DataFrame.")
return
try:
if crs is None:
crs = {'init': "EPSG:4326"}
# exclude those points with NaN in plot_world_coordinates
if dropna:
geometry = [Point(xy) for xy in zip(
data[longitude_col_name].dropna(),
data[latitude_col_name].dropna()
)]
data_geo = GeoDataFrame(
data.dropna(
subset=[latitude_col_name, longitude_col_name]),
crs=crs,
geometry=geometry)
logger.info("Data geometrized: converted into GeoPandas dataframe.")
logger.info("Points with NaN coordinates ignored. ")
else:
geometry = [Point(xy) for xy in zip(
data[longitude_col_name],
data[latitude_col_name]
)]
data_geo = GeoDataFrame(data, crs=crs, geometry=geometry)
logger.info("Data geometrized: converted into GeoPandas dataframe.")
except AttributeError:
logger.error("No latitude/longitude data to convert into a geometry. Please load the data first.")
return data_geo
def polygonize(self, band_number=1):
"""
Extract shapes from raster features. This is the inverse operation of rasterizing shapes.
Uses the `Rasterio <https://mapbox.github.io/rasterio/_modules/rasterio/features.html>'_ library
for this purpose. The data is loaded into a `geopandas <http://geopandas.org/user.html>`_ GeoDataFrame.
GeoDataFrame data structures are pandas DataFrames with added functionality, containing a ``geometry``
column for the `Shapely <http://toblerity.org/shapely/shapely.geometry.html>`_ geometries.
The raster data should be loaded in the layer before calling this method.
:param int band_number: The index of the raster band which is to be used as input for extracting \
gemetrical shapes.
:returns: geopandas.GeoDataFrame
"""
raster_data = self.read(band_number)
mask = raster_data != self.raster_reader.nodata
T0 = self.raster_reader.affine
shapes = features.shapes(raster_data, mask=mask, transform=T0)
df = GeoDataFrame.from_records(shapes, columns=['geometry', 'value'])
# convert the geometry dictionary from a dictionary format like {'coordinates': [[(-73.5, 83.5),
# (-73.5, 83.0),
# (-68.0, 83.0),
# (-68.0, 83.5),
# (-73.5, 83.5)]],
# 'type': 'Polygon'}
# to a proper shapely polygon format
df.geometry = df.geometry.apply(lambda row: Polygon(row['coordinates'][0]))
df.crs = self.raster_reader.crs
return df
@classmethod
def plot_world_coordinates(cls,
coordinates=None,
figsize=(16, 12),
projection='merc',
facecolor='crimson'):
"""
Visually plots coordinates on a `Basemap <http://matplotlib.org/basemap/api/basemap_api.html#module-mpl_toolkits.basemap>`_.
Basemap supports projections (with coastlines and political boundaries) using matplotlib.
The coordinates data must be provided as a tuple of Numpy arrays, one for the x, and one for the y values of the coordinates.
First, the data is converted to a ``pandas.DataFrame`` with the x and y arrays transposed as ``decimallatitude`` and ``decimallongitude``
columns.
Next, the :func:`__geometrize__` method is used to convert the dataframe into a geoopandas format (with a ``geometry`` column).
:param tuple coordinates: A tuple containing Numpy arrays, one for the x, and one for the y values of the coordinates.
:param tuple figsize: A tuple containing the (width, height) of the plot, in inches. Default is (16, 12)
:param string projection: The projection to use for plotting. Supported projection values from
`Basemap <http://matplotlib.org/basemap/api/basemap_api.html#module-mpl_toolkits.basemap>`_. Default is 'merc' (Mercator)
:param string facecolor: Fill color for the geometries. Defaylt is "crimson" (red)
:returns: a map with geometries plotted, zoomed to the total boundaries of the geometry Series (column) of the DataFrame.
"""
if coordinates is None or not isinstance(coordinates, tuple) \
or not isinstance(coordinates[0], np.ndarray) \
or not isinstance(coordinates[1], np.ndarray):
logger.error("Please provide the coordinates to plot, in the correct format.")
logger.error("Use pixel_to_world_coordinates() for this.")
return
# empty dataset
if coordinates[0].shape[0] == 0:
logger.error("No data to plot.")
return
data = pd.DataFrame([coordinates[0], coordinates[1]]).T
data.columns = ['decimallatitude', 'decimallongitude']
data_geometrized = cls.__geometrize__(data)
mm = Basemap(projection=projection, lat_0=50, lon_0=-100,
resolution='l', area_thresh=1000.0,
llcrnrlon=data_geometrized.geometry.total_bounds[0], # lower left corner longitude point
llcrnrlat=data_geometrized.geometry.total_bounds[1], # lower left corner latitude point
urcrnrlon=data_geometrized.geometry.total_bounds[2], # upper right longitude point
urcrnrlat=data_geometrized.geometry.total_bounds[3] # upper right latitude point
)
# prepare longitude/latitude list for basemap
ax1 = plt.subplots(figsize=figsize)[1]
plt.title("World coordinates of raster data.")
mm.drawcoastlines()
mm.drawcountries()
mm.drawrivers(color='lightskyblue', linewidth=1.5)
mm.drawmapboundary(fill_color='lightskyblue')
mm.fillcontinents(color='cornsilk')
# draw latitude and longitude
mm.drawmeridians(np.arange(-180, 180, 10), labels=[False, False, False, True])
mm.drawparallels(np.arange(-180, 180, 10), labels=[True, True, False, False])
patches = []
selection = data_geometrized
for poly in selection.geometry:
if poly.geom_type == 'Polygon':
mpoly = shapely.ops.transform(mm, poly)
patches.append(PolygonPatch(mpoly))
elif poly.geom_type == 'MultiPolygon':
for subpoly in poly:
mpoly = shapely.ops.transform(mm, subpoly)
patches.append(PolygonPatch(mpoly))
elif poly.geom_type == "Point":
patches.append(PolygonPatch(Point(mm(poly.x, poly.y)).buffer(9999))) # TODO: this buffer thing is tricky around the edges of a map
else:
logger.warning("Geometry type %s not supported. Skipping ... " % poly.geom_type)
continue
ax1.add_collection(PatchCollection(patches, facecolor=facecolor, match_original=True, zorder=100))
plt.show()
def plot(self, figsize=(25, 20), band_number=1):
"""
A simple plot of the raster image data. The data should be loaded before calling this method.
:param tuple figsize: A tuple containing the (width, height) of the plot, in inches. Default is (25, 20)
:param int band_number: The index of the band to use for plotting the raster data.
"""
if not self.raster_reader or self.raster_reader.closed:
logger.info("The dataset is closed. Please load it first using .load_data()")
return
plt.figure(figsize=figsize)
plt.title(self.name_layer)
plt.imshow(self.read(band_number), cmap="flag", interpolation="none")
def close_dataset(self):
"""
Close the ``rasterio._io.RasterReader`` file reader, if open. This releases resources such as memory.
"""
if not self.raster_reader.closed:
self.raster_reader.close()
logger.info("Dataset %s closed. " % self.file_path)
def get_data(self):
"""
:returns: A raster file reader, from which any band data can be read using .read(band_number)
:rtype: rasterio._io.RasterReader
"""
if not self.raster_reader or self.raster_reader.closed:
logger.info("The dataset is closed. Please load it first using .load_data()")
return
return self.raster_reader
def read(self, band_number=1):
"""
Read a particular band from the raster data array.
:param int band_number: The index of the band to read.
:returns: A 2-dimensional Numpy array containing the pixel values of that particular band.
"""
if not hasattr(self, 'raster_reader'):
raise AttributeError("Please load the data first, using .load_data()")
if not self.raster_reader or self.raster_reader.closed:
logger.info("The dataset is closed. Please load it first using .load_data()")
return
return self.raster_reader.read(band_number)
def reproject(self, destination_file, source_file=None, resampling=RESAMPLING.nearest, **kwargs):
"""
Reprojects the pixels of a source raster map to a destination raster, with a different reference coordinate
system and Affine transform. It uses `Rasterio <https://github.com/mapbox/rasterio/blob/master/docs/reproject.rst>`_
calculate_default_transform() to calculate parameters such as the resolution (if not provided), and the destination
transform and dimensions.
param string source_file: Full path to the source file containing a raster map
param string destination_file: Full path to the destination file containing a raster map
:param int resampling: Resampling method to use. Can be one of the following: ``Resampling.nearest``, ``Resampling.bilinear``, \
``Resampling.cubic``, ``Resampling.cubic_spline``, ``Resampling.lanczos``, ``Resampling.average``, ``Resampling.mode``.
:param dict kwargs: Optional additional arguments passed to the method, to parametrize the reprojection. \
For example: :attr:`dst_crs` for the target coordinate reference system, :attr:`resolution` for the target resolution, \
in units of target coordinate reference system.
"""
if not source_file:
if not self.file_path:
raise AttributeError("Please provide a source_file to load the data from.")
else:
source_file = self.file_path
with rasterio.open(source_file) as src:
affine, width, height = calculate_default_transform(src_crs=src.crs,
dst_crs=kwargs.get('dst_crs', src.crs),
width=kwargs.get('width', src.width),
height=kwargs.get('height', src.height),
left=kwargs.get('left', src.bounds.left),
bottom=kwargs.get('bottom', src.bounds.bottom),
right=kwargs.get('right', src.bounds.right),
top=kwargs.get('top', src.bounds.top),
resolution=kwargs.get('resolution', src.res)
)
logger.info("Calculated default transformation:")
logger.info("Affine:\n{0} \n width={1}, height={2}".format(affine, width, height))
kwargs = src.meta.copy()
kwargs.update({'transform': affine,
'affine': affine,
'width': width,
'height': height
})
with rasterio.open(destination_file, 'w', **kwargs) as dst:
for i in range(1, src.count + 1):
rasterio.warp.reproject(source=rasterio.band(src, i),
destination=rasterio.band(dst, i),
src_transform=src.affine,
src_crs=src.crs,
dst_transform=affine,
dst_crs=kwargs.get('dst_crs', src.crs),
resampling=resampling
)
logger.info("Reprojected data in %s " % destination_file)
# def overlay(self, range_map):
# """
# Extract mask from a raster map, where it intersects with a vector feature, like a polygon.
# rasterio provides option to "burn" vector shapes into rasters (rasterize the geometry). Then we create
# a raster mask layer
# """
# if not (isinstance(range_map, VectorEnvironmentalLayer) or isinstance(range_map, IUCNSpecies)):
# raise AttributeError("Please provide a correct rangemap input.")
# # TODO: what about if the overlay is just a shape file?
# # TODO: what if there are multiple geometries in a shapefile? Currently it just returns the last
# # but could make a list and append
# masked_data = None
# if isinstance(range_map, VectorEnvironmentalLayer) or isinstance(range_map, IUCNSpecies):
# for geometry in range_map.get_data()['geometry']:
# if self.raster_reader.closed:
# self.load_data(self.file_path)
# with self.raster_reader as raster:
# # get pixel coordinates of the geometry's bounding box
# ul = raster.index(*geometry.bounds[0:2])
# lr = raster.index(*geometry.bounds[2:4])
# # read the subset of the data into a numpy array
# window = ((lr[0], ul[0] + 1), (ul[1], lr[1] + 1))
# data = raster.read(1, window=window)
# # create an affine transform for the subset data
# t = raster.affine
# shifted_affine = Affine(t.a, t.b, t.c + ul[1] * t.a, t.d, t.e, t.f + lr[0] * t.e)
# # rasterize the geometry
# mask = features.rasterize(
# [(geometry, 0)],
# out_shape=data.shape,
# transform=shifted_affine,
# fill=1,
# all_touched=True,
# dtype=np.uint8)
# # create a masked numpy array
# masked_data = np.ma.array(data=data, mask=mask.astype(bool))
# self.masked_data = masked_data
# logger.info("Overlayed raster climate data with the given range map.")
# logger.info("Use the .masked_data attribute to access it.")
def sample_pseudo_absences(self,
species_raster_data,
suitable_habitat=None,
bias_grid=None,
band_number=1,
number_of_pseudopoints=1000):
"""
Samples a :attr:`number_of_pseudopoints` points from the ``RasterEnvironmentalLayer`` data (raster map),
based on a given species raster map which is assumed to contain species presence points (or potential presence).
The :attr:`species_raster_data` is used to determine which distinct regions (cell values) from the entire
environmental raster map, should be taken into account for potential pseudo-absence sampling regions. In other words,
which realms or ecoregions should be taken into account.
Optionally, suitable habitat raster (with binary, 0/1s values) can be provided to further limit the area of sampling.
Finally, presence pixels are removed from this map, and the resulting pixels are used as a base for sampling
pseudo-absences. Optionally, a bias grid can be provided to bias the "random" sampling of pseudo absences.
If the number of such resulting pixels left is smaller than the number of requested pseudo-absence
points, all pixels are automatically taken as pseudo-absence points, and no random sampling is done.
Otherwise, :attr:`number_of_pseudopoints` pixels positions (indices) are randomly chosen at once (for speed),
rather than randomly sampling one by one until the desired number of pseudo-absences is reached.
:param np.ndarray species_raster_data: A raster map containing the species presence pixels. If not provided, \
by default the one loaded previously (if available, otherwise .load_data() should be used before) is used.
:param np.ndarray suitable_habitat: A raster map containing the species suitable habitat. It should contain only \
values of 0 and 1, 1s depicting a suitable areas, while 0s unsuitable.
:param np.ndarray bias_grid: A raster map containing the sampling bias grid. It should contain integer values depicting \
a sampling intensity at every pixel location.
:param int band_number: The index of the band from the :attr:`species_raster_data` to use as input. Default is 1.
:param int number_of_pseudopoints: Number of pseudo-absence points to sample from the raster environmental layer data.
:returns: A tuple containing two raster maps, one with all potential background pixels chosen to sample from, \
and second with all the actual sampled pixels.
:rtype: tuple(np.ndarray, np.ndarray)
"""
if not (isinstance(species_raster_data, np.ndarray)) or not (set(np.unique(species_raster_data)) == set({0, 1})):
logger.error("Please provide the species raster data as a numpy array with pixel values 1 and 0 (presence/absence).")
return
if self.raster_reader.closed or not hasattr(self, 'env_raster_data'):
try:
self.env_raster_data = self.read(band_number)
logger.info("Succesfully loaded existing raster data from %s." % self.file_path)
except AttributeError as e:
logger.error("Could not open raster file. %s " % str(e))
if species_raster_data.shape != self.env_raster_data.shape:
logger.error("Please provide (global) species raster data at the same resolution as the environment")
logger.error("Environment data has the following shape %s " % (self.env_raster_data.shape, ))
return
logger.info("Sampling %s pseudo-absence points from environmental layer." % number_of_pseudopoints)
# first set to zero all pixels that have "nodata" values in the environmental raster
self.env_raster_data[self.env_raster_data == self.raster_reader.nodata] = 0
# next get all the overlapping pixels between the species raster and the environment data
presences_pixels = self.env_raster_data * species_raster_data
# what are the unique values left? (these are the distinct "regions" that need to be taken into account)
# Do NOT take into account the 0-value pixel, which we assigned to all "nodata" pixels
unique_regions = np.unique(presences_pixels[presences_pixels != 0])
if len(unique_regions) == 0:
logger.info("There are no environmental layers to sample pseudo-absences from. ")
return (np.zeros_like(presences_pixels), np.zeros_like(presences_pixels))
logger.debug("The following unique (region ID) values will be taken into account for sampling pseudo-absences")
logger.debug(unique_regions)
# add the pixels of all these regions to a layer array
regions = []
for region in unique_regions:
regions.append(np.where(self.env_raster_data == region))
# now "regions" contains a list of tuples, each tuple with separate x/y indexes (arrays thereof) of the pixels
# make an empty "base" matrix and fill it with the selected regions pixel values
selected_pixels = np.zeros_like(self.env_raster_data)
# pick out only those layers that have been selected and fill in the matrix
for layer in regions:
selected_pixels[layer] = self.env_raster_data[layer]
del regions
gc.collect()
# sample from those pixels which are in the selected raster regions, minus those of the species presences
pixels_to_sample_from = selected_pixels - presences_pixels
del presences_pixels
sampled_pixels = np.zeros_like(selected_pixels)
del selected_pixels
gc.collect()
# next: narrow the area to sample from, to the suitable habitat, if raster data is provided
if suitable_habitat is not None:
logger.info("Will limit sampling area to suitable habitat.")
# Multiplying the suitable habitat layer (with 1s and 0s) with the
# previously selected pixels, will narrow to those common for both.
pixels_to_sample_from = pixels_to_sample_from * suitable_habitat
# magic, don't touch :P
if bias_grid is not None:
logger.info("Will use the provided bias_grid for sampling.")
# common x/y coordinates where bias_grid overlaps with pixels_to_sample_from
(x, y) = np.where(pixels_to_sample_from * bias_grid > 0)
# how many we have?
number_pixels_to_sample_from_bias_grid = x.shape[0]
logger.info("There are %s nonzero pixels from bias grid to use for sampling." % number_pixels_to_sample_from_bias_grid)
if number_pixels_to_sample_from_bias_grid == 0:
logger.info("No non-zero pixels from bias grid to sample.")
if number_pixels_to_sample_from_bias_grid < number_of_pseudopoints:
logger.info("Will sample all %s nonzero pixels from bias grid." % number_pixels_to_sample_from_bias_grid)
# "random" here is not so random as we will select all of them, i.e., the entire range
random_indices = np.arange(0, number_pixels_to_sample_from_bias_grid)
for position in random_indices:
sampled_pixels[x[position]][y[position]] = pixels_to_sample_from[x[position], y[position]]
# remove already sampled pixels
pixels_to_sample_from = pixels_to_sample_from - sampled_pixels
# Subtract from number of pseudo-points left to sample
number_of_pseudopoints = number_of_pseudopoints - number_pixels_to_sample_from_bias_grid
logger.info("Number of pseudo-points left to sample after bias_grid sampling: %s " % number_of_pseudopoints)
elif number_pixels_to_sample_from_bias_grid > number_of_pseudopoints:
# hopefully this will rarely happen, as it is very compute-intensive and memory-hungry
logger.info("More pixels available to sample from bias grid, than necessary. Selecting top %s" % number_of_pseudopoints)
# make a grid of all pixels from bias_grid that overlap with the pixels_to_sample_from
pixels_to_sample_from_bias_grid = np.where(pixels_to_sample_from * bias_grid > 0, bias_grid, 0)
# find the top number_of_pseudopoints (heighest values)
flat_indices = np.argpartition(pixels_to_sample_from_bias_grid.ravel(), -number_of_pseudopoints - 1)[-number_of_pseudopoints:]
# some logic to get the indices of the matrix ...
row_indices, col_indices = np.unravel_index(flat_indices, pixels_to_sample_from_bias_grid.shape)
sampled_pixels[row_indices, col_indices] = pixels_to_sample_from[row_indices, col_indices]
del pixels_to_sample_from_bias_grid
del row_indices, col_indices, flat_indices
gc.collect()
return (pixels_to_sample_from, sampled_pixels)
# These are x/y positions of pixels to sample from. Tuple of arrays.
(x, y) = np.where(pixels_to_sample_from > 0)
number_pixels_to_sample_from = x.shape[0] # == y.shape[0], since every pixel has (x,y) position.
logger.info("There are %s pixels left to sample from..." % (number_pixels_to_sample_from))
if number_pixels_to_sample_from == 0:
logger.error("There are no pixels left to sample from. Perhaps the species raster data")
logger.error("covers the entire range from which it was intended to sample further.")
return (pixels_to_sample_from, sampled_pixels)
if number_pixels_to_sample_from < number_of_pseudopoints:
logger.warning("There are less pixels to sample from, than the desired number of pseudo-absences")
logger.warning("Will select all pixels as psedudo-absences.")
random_indices = np.arange(0, number_pixels_to_sample_from)
else:
# now randomly choose <number_of_pseudopoints> indices to fill in with pseudo absences
# random_indices = np.random.randint(0, number_pixels_to_sample_from, number_of_pseudopoints)
# Changed the above method with a different one that guarantees unique positions.
# With on average 6000 (guesstimate) pixels to sample from, the method below
# is a factor of 50 slower, but guarantees no repetitions,
# (20 microseconds vs 100 microseconds) compared to the one above.
random_indices = np.random.choice(number_pixels_to_sample_from,
number_of_pseudopoints,
replace=False)
logger.info("Filling %s random pixel positions..." % (len(random_indices)))
# fill in those indices with the pixel values of the environment layer
for position in random_indices:
sampled_pixels[x[position]][y[position]] = pixels_to_sample_from[x[position], y[position]]
logger.info("Sampled %s unique pixels as pseudo-absences." % sampled_pixels.nonzero()[0].shape[0])
del random_indices
gc.collect()
return (pixels_to_sample_from, sampled_pixels)
class VectorEnvironmentalLayer(EnvironmentalLayer):
"""
VectorEnvironmentalLayer
A class for encapsulating the vector environmental layer functionality, with operations such as rasterizing.
:ivar file_path: Full location of the shapefile containing the data for this layer.
:vartype file_path: string
:ivar data_full: Data frame containing the full data for the environmental layer geometries.
:vartype data_full: geopandas.GeoDataFrame
:ivar raster_file: Full location of the corresponding raster map data for this layer.
:vartype raster_file: string
:ivar raster_affine: Affine translation used in the corresponding raster map of this layer.
:vartype raster_affine: rasterio.transform.Affine
:ivar raster_reader: file reader for the corresponding rasterized data.
:vartype raster_reader: rasterio._io.RasterReader
"""
def __init__(self, source=None, file_path=None, name_layer=None, **kwargs):
EnvironmentalLayer.__init__(self, source, file_path, name_layer, **kwargs)
def load_data(self, file_path=None):
"""
Loads the environmental data from the provided :attr:`file_path` shapefile into a ``geopandas.GeoDataFrame``.
A GeoDataFrame is a tablular data structure that contains a column called ``geometry`` which contains a GeoSeries of
`Shapely <http://toblerity.org/shapely/shapely.geometry.html>`_ geometries. all other meta-data column names are
converted to a lower-case, for consistency.
:param string file_path: The full path to the shapefile file (including the directory and filename in one string).
:returns: None
"""
if file_path:
self.file_path = file_path
if not self.file_path:
raise AttributeError("Please provide a file_path argument to load the data from.")
logger.info("Loading data from %s " % self.file_path)
self.data_full = GeoDataFrame.from_file(self.file_path)
self.data_full.columns = [x.lower() for x in self.data_full.columns]
logger.info("The shapefile contains data on %d environmental regions." % self.data_full.shape[0])
def save_data(self, full_name=None, driver='ESRI Shapefile', overwrite=False):
"""
Saves the current ``geopandas.GeoDataFrame`` data in a shapefile. The data is expected to have a 'geometry'
as a column, besides other metadata metadata. If the full location and name of the file is not provided,
then the :attr:`overwrite` should be set to ``True`` to overwrite the existing shapefile from which the
data was previously loaded.
:param string file_path: The full path to the targed shapefile file (including the directory and filename in one string).
:param string driver: The driver to use for storing the geopandas.GeoDataFrame data into a file. Default is "ESRI Shapefile".
:param bool overwrite: Whether to overwrite the shapefile from which the data was previously loaded, if a new :attr:`file_path` is not supplied.
:returns: None
"""
if not (isinstance(self.data_full, GeoSeries) or isinstance(self.data_full, GeoDataFrame)):
raise AttributeError("The data is not of a Geometry type (GeoSeries or GeoDataFrame).",
"Please geometrize first!")
if overwrite:
full_name = self.file_path
elif not overwrite and full_name is None:
raise AttributeError("Please provide a shape_file location, or set overwrite=True")
try:
self.data_full.to_file(full_name, driver="ESRI Shapefile")
logger.debug("Saved data: %s " % full_name)
except AttributeError as e:
logger.error("Could not save data! %s " % str(e))
def get_data(self):
"""
Returns the (pre)loaded species data in a (geo)pandas DataFrame.
:returns: :attr:`self.data_full`
:rtype: geopandas.GeoDataFrame or pandas.DataFrame
"""
return self.data_full
def set_data(self, data_frame):
"""
Set the species data to the contents of :attr:`data_frame`. The data passed must be in a
pandas or geopandas DataFrame.
**Careful**, it overwrites the existing data!
:param pandas.DataFrame data_frame: The new data.
:returns: None
"""
if not isinstance(self.data_full, GeoDataFrame) or not isinstance(self.data_full, pd.DataFrame):
raise AttributeError("Data is not in a correct format! Please pass pandas or geopandas DataFrame.")
else:
self.data_full = data_frame
def rasterize(self, raster_file=None,
pixel_size=None,
all_touched=False,
no_data_value=0,
default_value=1,
crs=None,
cropped=False,
classifier_column=None,
*args, **kwargs):
"""
Rasterize (burn) the environment rangemaps (geometrical shapes) into pixels (cells), i.e., a 2-dimensional image array
of type numpy ndarray. Uses the `Rasterio <https://mapbox.github.io/rasterio/_modules/rasterio/features.html>`_ library
for this purpose. All the shapes from the ``VectorEnvironmentalLayer`` object data are burned in a single *band* of the image.
Rasterio datasets can generally have one or more bands, or layers. Following the GDAL convention, these are indexed starting with 1.
:param string raster_file: The full path to the targed GeoTIFF raster file (including the directory and filename in one string).
:param int pixel_size: The size of the pixel in degrees, i.e., the resolution to use for rasterizing.
:param bool all_touched: If true, all pixels touched by geometries, will be burned in. If false, only pixels \
whose center is within the polygon or that are selected by Bresenham's line algorithm, will be burned in.
:param int no_data_value: Used as value of the pixels which are not burned in. Default is 0.
:param int default_value: Used as value of the pixels which are burned in. Default is 1.
:param crs: The Coordinate Reference System to use. Default is "ESPG:4326"
:param bool cropped: If true, the resulting pixel array (image) is cropped to the region borders, which contain \
the burned pixels (i.e., an envelope within the range). Otherwise, a "global world map" is used, i.e., the boundaries \
are set to (-180, -90, 180, 90) for the resulting array.
:returns: Rasterio RasterReader file object which can be used to read individual bands from the raster file.
:rtype: rasterio._io.RasterReader
"""
if not (pixel_size or raster_file):
raise AttributeError("Please provide pixel_size and a target raster_file.")
if not hasattr(self, 'data_full'):
raise AttributeError("You have not loaded the data.")
if crs is None:
crs = {'init': "EPSG:4326"}
# crop to the boundaries of the shape?
if cropped:
# cascaded_union_geometry = shapely.ops.cascaded_union(self.data_full.geometry)
x_min, y_min, x_max, y_max = self.data_full.geometry.total_bounds
# else global map
else:
x_min, y_min, x_max, y_max = -180, -90, 180, 90
x_res = int((x_max - x_min) / pixel_size)
y_res = int((y_max - y_min) / pixel_size)
logger.info("Will rasterize using pixel_size=%s, all_touched=%s, no_data_value=%s, fill_value=%s "
% (pixel_size, all_touched, no_data_value, default_value))
transform = Affine.translation(x_min, y_max) * Affine.scale(pixel_size, -pixel_size)
if classifier_column:
logger.info("Will rasterize using classifier: %s." % classifier_column)
classifier_categories = self.data_full[classifier_column].unique()
stacked_layers = []
for category_name in classifier_categories:
if category_name:
logger.info("Rasterizing category %s " % category_name)
result = features.rasterize(self.data_full.geometry[self.data_full[classifier_column] == category_name],
transform=transform,
out_shape=(y_res, x_res),
all_touched=all_touched,
fill=no_data_value,
default_value=default_value
)
stacked_layers.append(result)
stacked_layers = np.stack(stacked_layers)
for i, band in enumerate(stacked_layers, 1):
with rasterio.open(raster_file, 'w', driver='GTiff', width=x_res, height=y_res,
count=stacked_layers.shape[0],
dtype=np.uint8,
nodata=no_data_value,
transform=transform,
crs=crs) as out:
out.write(band.astype(np.uint8), indexes=i)
result_final = stacked_layers
else:
logger.info("Will rasterize everything on a single band.")
result_final = features.rasterize(self.data_full.geometry,
transform=transform,
out_shape=(y_res, x_res),
all_touched=all_touched,
fill=no_data_value,
default_value=default_value
)
with rasterio.open(raster_file, 'w', driver='GTiff', width=x_res, height=y_res,
count=1,
dtype=np.uint8,
nodata=no_data_value,
transform=transform,
crs=crs) as out:
out.write(result_final.astype(np.uint8), indexes=1)
out.close()
logger.info("RASTERIO: Data rasterized into file %s " % raster_file)
logger.info("RASTERIO: Resolution: x_res={0} y_res={1}".format(x_res, y_res))
self.raster_file = raster_file
self.raster_affine = transform
self.stacked_layers = stacked_layers
return result_final
def load_raster_data(self, raster_file=None):
"""
Loads the raster data from a previously-saved raster file. Provides information about the
loaded data, and returns a rasterio file reader.
:param string raster_file: The full path to the targed GeoTIFF raster file (including the directory and filename in one string).
:returns: Rasterio RasterReader file object which can be used to read individual bands from the raster file.
:rtype: rasterio._io.RasterReader
"""
if raster_file:
self.raster_file = raster_file
if not self.raster_file:
raise AttributeError("Please provide a raster_file to read raster data from.")
src = rasterio.open(self.raster_file)
logger.info("Loaded raster data from %s " % self.raster_file)
logger.info("Driver name: %s " % src.driver)
pp = pprint.PrettyPrinter(depth=5)
self.metadata = src.meta
logger.info("Metadata: %s " % pp.pformat(self.metadata))
logger.info("Resolution: x_res={0} y_res={1}.".format(src.width, src.height))
logger.info("Bounds: %s " % (src.bounds,))
logger.info("Coordinate reference system: %s " % src.crs)
logger.info("Affine transformation: %s " % (src.affine.to_gdal(),))
logger.info("Number of layers: %s " % src.count)
logger.info("Dataset loaded. Use .read() or .read_masks() to access the layers.")
self.raster_affine = src.affine
self.raster_reader = src
return self.raster_reader
def set_classifier(self, classifier_column):
self.classifier_column = classifier_column
def get_classifier(self):
return self.classifier_column
def set_raster_file(self, raster_file):
self.raster_file = raster_file
def get_raster_file(self):
return self.raster_file
def set_pixel_size(self, pixel_size):
self.pixel_size = pixel_size
def get_pixel_size(self):
return self.pixel_size
class ClimateLayer(RasterEnvironmentalLayer):
pass
class DEMLayer(RasterEnvironmentalLayer):
pass
|
{
"content_hash": "f57dd737f998b8b8cf39c2877d43c246",
"timestamp": "",
"source": "github",
"line_count": 990,
"max_line_length": 152,
"avg_line_length": 51.26262626262626,
"alnum_prop": 0.6180689655172413,
"repo_name": "remenska/iSDM",
"id": "cd29aec40456b3cc0cd20508b5da8509b7666e94",
"size": "50751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iSDM/environment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "17805444"
},
{
"name": "Python",
"bytes": "177224"
},
{
"name": "R",
"bytes": "7330"
}
],
"symlink_target": ""
}
|
from sympy.core.sympify import _sympify, sympify
from sympy.core.basic import Basic
from sympy.core.singleton import Singleton, S
from sympy.core.evalf import EvalfMixin
from sympy.core.numbers import Float
from sympy.core.compatibility import iterable
from sympy.core.decorators import deprecated
from sympy.mpmath import mpi, mpf
from sympy.assumptions import ask
from sympy.logic.boolalg import And, Or
from sympy.utilities import default_sort_key
class Set(Basic):
"""
The base class for any kind of set.
This is not meant to be used directly as a container of items.
It does not behave like the builtin set; see FiniteSet for that.
Real intervals are represented by the Interval class and unions of sets
by the Union class. The empty set is represented by the EmptySet class
and available as a singleton as S.EmptySet.
"""
is_number = False
is_iterable = False
is_interval = False
is_FiniteSet = False
is_Interval = False
is_ProductSet = False
is_Union = False
is_Intersection = None
is_EmptySet = None
is_UniversalSet = None
def sort_key(self, order=None):
"""
Give sort_key of infimum (if possible) else sort_key of the set.
"""
try:
infimum = self.inf
if infimum.is_comparable:
return default_sort_key(infimum, order)
except (NotImplementedError, ValueError):
pass
args = tuple([default_sort_key(a, order) for a in self._sorted_args])
return self.class_key(), (len(args), args), S.One.class_key(), S.One
def union(self, other):
"""
Returns the union of 'self' and 'other'.
As a shortcut it is possible to use the '+' operator:
>>> from sympy import Interval, FiniteSet
>>> Interval(0, 1).union(Interval(2, 3))
[0, 1] U [2, 3]
>>> Interval(0, 1) + Interval(2, 3)
[0, 1] U [2, 3]
>>> Interval(1, 2, True, True) + FiniteSet(2, 3)
(1, 2] U {3}
Similarly it is possible to use the '-' operator for set differences:
>>> Interval(0, 2) - Interval(0, 1)
(1, 2]
>>> Interval(1, 3) - FiniteSet(2)
[1, 2) U (2, 3]
"""
return Union(self, other)
def intersect(self, other):
"""
Returns the intersection of 'self' and 'other'.
>>> from sympy import Interval
>>> Interval(1, 3).intersect(Interval(1, 2))
[1, 2]
"""
return Intersection(self, other)
def _intersect(self, other):
"""
This function should only be used internally
self._intersect(other) returns a new, intersected set if self knows how
to intersect itself with other, otherwise it returns None
When making a new set class you can be assured that other will not
be a Union, FiniteSet, or EmptySet
Used within the Intersection class
"""
return None
def _union(self, other):
"""
This function should only be used internally
self._union(other) returns a new, joined set if self knows how
to join itself with other, otherwise it returns None.
It may also return a python set of SymPy Sets if they are somehow
simpler. If it does this it must be idempotent i.e. the sets returned
must return None with _union'ed with each other
Used within the Union class
"""
return None
@property
def complement(self):
"""
The complement of 'self'.
As a shortcut it is possible to use the '~' or '-' operators:
>>> from sympy import Interval
>>> Interval(0, 1).complement
(-oo, 0) U (1, oo)
>>> ~Interval(0, 1)
(-oo, 0) U (1, oo)
>>> -Interval(0, 1)
(-oo, 0) U (1, oo)
"""
return self._complement
@property
def _complement(self):
raise NotImplementedError("(%s)._complement" % self)
@property
def inf(self):
"""
The infimum of 'self'
>>> from sympy import Interval, Union
>>> Interval(0, 1).inf
0
>>> Union(Interval(0, 1), Interval(2, 3)).inf
0
"""
return self._inf
@property
def _inf(self):
raise NotImplementedError("(%s)._inf" % self)
@property
def sup(self):
"""
The supremum of 'self'
>>> from sympy import Interval, Union
>>> Interval(0, 1).sup
1
>>> Union(Interval(0, 1), Interval(2, 3)).sup
3
"""
return self._sup
@property
def _sup(self):
raise NotImplementedError("(%s)._sup" % self)
def contains(self, other):
"""
Returns True if 'other' is contained in 'self' as an element.
As a shortcut it is possible to use the 'in' operator:
>>> from sympy import Interval
>>> Interval(0, 1).contains(0.5)
True
>>> 0.5 in Interval(0, 1)
True
"""
return self._contains(sympify(other, strict=True))
def _contains(self, other):
raise NotImplementedError("(%s)._contains(%s)" % (self, other))
def subset(self, other):
"""
Returns True if 'other' is a subset of 'self'.
>>> from sympy import Interval
>>> Interval(0, 1).contains(0)
True
>>> Interval(0, 1, left_open=True).contains(0)
False
"""
if isinstance(other, Set):
return self.intersect(other) == other
else:
raise ValueError("Unknown argument '%s'" % other)
@property
def measure(self):
"""
The (Lebesgue) measure of 'self'
>>> from sympy import Interval, Union
>>> Interval(0, 1).measure
1
>>> Union(Interval(0, 1), Interval(2, 3)).measure
2
"""
return self._measure
@property
def _measure(self):
raise NotImplementedError("(%s)._measure" % self)
def __add__(self, other):
return self.union(other)
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersect(other)
def __mul__(self, other):
return ProductSet(self, other)
def __pow__(self, exp):
if not sympify(exp).is_Integer and exp>=0:
raise ValueError("%s: Exponent must be a positive Integer"%exp)
return ProductSet([self]*exp)
def __sub__(self, other):
return self.intersect(other.complement)
def __neg__(self):
return self.complement
def __invert__(self):
return self.complement
def __contains__(self, other):
symb = self.contains(other)
result = ask(symb)
if result is None:
raise TypeError('contains did not evaluate to a bool: %r' % symb)
return result
@property
def is_real(self):
return None
class ProductSet(Set):
"""
Represents a Cartesian Product of Sets.
Returns a Cartesian product given several sets as either an iterable
or individual arguments.
Can use '*' operator on any sets for convenient shorthand.
Examples
========
>>> from sympy import Interval, FiniteSet, ProductSet
>>> I = Interval(0, 5); S = FiniteSet(1, 2, 3)
>>> ProductSet(I, S)
[0, 5] x {1, 2, 3}
>>> (2, 2) in ProductSet(I, S)
True
>>> Interval(0, 1) * Interval(0, 1) # The unit square
[0, 1] x [0, 1]
>>> coin = FiniteSet('H', 'T')
>>> set(coin**2)
set([(H, H), (H, T), (T, H), (T, T)])
Notes
=====
- Passes most operations down to the argument sets
- Flattens Products of ProductSets
References
==========
http://en.wikipedia.org/wiki/Cartesian_product
"""
is_ProductSet = True
def __new__(cls, *sets, **assumptions):
def flatten(arg):
if isinstance(arg, Set):
if arg.is_ProductSet:
return sum(map(flatten, arg.args), [])
else:
return [arg]
elif iterable(arg):
return sum(map(flatten, arg), [])
raise TypeError("Input must be Sets or iterables of Sets")
sets = flatten(list(sets))
if EmptySet() in sets or len(sets)==0:
return EmptySet()
return Basic.__new__(cls, *sets, **assumptions)
def _contains(self, element):
"""
'in' operator for ProductSets
>>> from sympy import Interval
>>> (2, 3) in Interval(0, 5) * Interval(0, 5)
True
>>> (10, 10) in Interval(0, 5) * Interval(0, 5)
False
Passes operation on to constituent sets
"""
try:
if len(element) != len(self.args):
return False
except TypeError: # maybe element isn't an iterable
return False
return And(*[set.contains(item) for set, item in zip(self.sets, element)])
def _intersect(self, other):
"""
This function should only be used internally
See Set._intersect for docstring
"""
if not other.is_ProductSet:
return None
if len(other.args) != len(self.args):
return S.EmptySet
return ProductSet(a.intersect(b)
for a, b in zip(self.sets, other.sets))
@property
def sets(self):
return self.args
@property
def _complement(self):
# For each set consider it or it's complement
# We need at least one of the sets to be complemented
# Consider all 2^n combinations.
# We can conveniently represent these options easily using a ProductSet
switch_sets = ProductSet(FiniteSet(s, s.complement) for s in self.sets)
product_sets = (ProductSet(*set) for set in switch_sets)
# Union of all combinations but this one
return Union(p for p in product_sets if p != self)
@property
def is_real(self):
return all(set.is_real for set in self.sets)
@property
def is_iterable(self):
return all(set.is_iterable for set in self.sets)
def __iter__(self):
if self.is_iterable:
from sympy.core.compatibility import product
return product(*self.sets)
else:
raise TypeError("Not all constituent sets are iterable")
@property
def _measure(self):
measure = 1
for set in self.sets:
measure *= set.measure
return measure
class Interval(Set, EvalfMixin):
"""
Represents a real interval as a Set.
Usage:
Returns an interval with end points "start" and "end".
For left_open=True (default left_open is False) the interval
will be open on the left. Similarly, for right_open=True the interval
will be open on the right.
Examples
========
>>> from sympy import Symbol, Interval, sets
>>> Interval(0, 1)
[0, 1]
>>> Interval(0, 1, False, True)
[0, 1)
>>> a = Symbol('a', real=True)
>>> Interval(0, a)
[0, a]
Notes
=====
- Only real end points are supported
- Interval(a, b) with a > b will return the empty set
- Use the evalf() method to turn an Interval into an mpmath
'mpi' interval instance
References
==========
<http://en.wikipedia.org/wiki/Interval_(mathematics)>
"""
is_Interval = True
is_real = True
def __new__(cls, start, end, left_open=False, right_open=False):
start = _sympify(start)
end = _sympify(end)
# Only allow real intervals (use symbols with 'is_real=True').
if not start.is_real or not end.is_real:
raise ValueError("Only real intervals are supported")
# Make sure that the created interval will be valid.
if end.is_comparable and start.is_comparable:
if end < start:
return S.EmptySet
if end == start and (left_open or right_open):
return S.EmptySet
if end == start and not (left_open or right_open):
return FiniteSet(end)
# Make sure infinite interval end points are open.
if start == S.NegativeInfinity:
left_open = True
if end == S.Infinity:
right_open = True
return Basic.__new__(cls, start, end, left_open, right_open)
@property
def start(self):
"""
The left end point of 'self'.
This property takes the same value as the 'inf' property.
>>> from sympy import Interval
>>> Interval(0, 1).start
0
"""
return self._args[0]
_inf = left = start
@property
def end(self):
"""
The right end point of 'self'.
This property takes the same value as the 'sup' property.
>>> from sympy import Interval
>>> Interval(0, 1).end
1
"""
return self._args[1]
_sup = right = end
@property
def left_open(self):
"""
True if 'self' is left-open.
>>> from sympy import Interval
>>> Interval(0, 1, left_open=True).left_open
True
>>> Interval(0, 1, left_open=False).left_open
False
"""
return self._args[2]
@property
def right_open(self):
"""
True if 'self' is right-open.
>>> from sympy import Interval
>>> Interval(0, 1, right_open=True).right_open
True
>>> Interval(0, 1, right_open=False).right_open
False
"""
return self._args[3]
def _intersect(self, other):
"""
This function should only be used internally
See Set._intersect for docstring
"""
# We only know how to intersect with other intervals
if not other.is_Interval:
return None
# We can't intersect [0,3] with [x,6] -- we don't know if x>0 or x<0
if not self._is_comparable(other):
return None
empty = False
if self.start <= other.end and other.start <= self.end:
# Get topology right.
if self.start < other.start:
start = other.start
left_open = other.left_open
elif self.start > other.start:
start = self.start
left_open = self.left_open
else:
start = self.start
left_open = self.left_open or other.left_open
if self.end < other.end:
end = self.end
right_open = self.right_open
elif self.end > other.end:
end = other.end
right_open = other.right_open
else:
end = self.end
right_open = self.right_open or other.right_open
if end - start == 0 and (left_open or right_open):
empty = True
else:
empty = True
if empty:
return S.EmptySet
return Interval(start, end, left_open, right_open)
def _union(self, other):
"""
This function should only be used internally
See Set._union for docstring
"""
if other.is_Interval and self._is_comparable(other):
from sympy.functions.elementary.miscellaneous import Min, Max
# Non-overlapping intervals
end = Min(self.end, other.end)
start = Max(self.start, other.start)
if (end < start or
(end==start and (end not in self and end not in other))):
return None
else:
start = Min(self.start, other.start)
end = Max(self.end, other.end)
left_open = ((self.start != start or self.left_open) and
(other.start != start or other.left_open))
right_open = ((self.end != end or self.right_open) and
(other.end != end or other.right_open))
return Interval(start, end, left_open, right_open)
# If I have open end points and these endpoints are contained in other
if ((self.left_open and other.contains(self.start) is True) or
(self.right_open and other.contains(self.end) is True)):
# Fill in my end points and return
open_left = self.left_open and self.start not in other
open_right = self.right_open and self.end not in other
new_self = Interval(self.start, self.end, open_left, open_right)
return set((new_self, other))
return None
@property
def _complement(self):
a = Interval(S.NegativeInfinity, self.start, True, not self.left_open)
b = Interval(self.end, S.Infinity, not self.right_open, True)
return Union(a, b)
def _contains(self, other):
if self.left_open:
expr = other > self.start
else:
expr = other >= self.start
if self.right_open:
expr = And(expr, other < self.end)
else:
expr = And(expr, other <= self.end)
return expr
@property
def _measure(self):
return self.end - self.start
def to_mpi(self, prec=53):
return mpi(mpf(self.start.evalf(prec)), mpf(self.end.evalf(prec)))
def _eval_evalf(self, prec):
return Interval(self.left.evalf(), self.right.evalf(),
left_open=self.left_open, right_open=self.right_open)
def _is_comparable(self, other):
is_comparable = self.start.is_comparable
is_comparable &= self.end.is_comparable
is_comparable &= other.start.is_comparable
is_comparable &= other.end.is_comparable
return is_comparable
@property
def is_left_unbounded(self):
"""Return ``True`` if the left endpoint is negative infinity. """
return self.left is S.NegativeInfinity or self.left == Float("-inf")
@property
def is_right_unbounded(self):
"""Return ``True`` if the right endpoint is positive infinity. """
return self.right is S.Infinity or self.right == Float("+inf")
def as_relational(self, symbol):
"""Rewrite an interval in terms of inequalities and logic operators. """
from sympy.core.relational import Lt, Le
if not self.is_left_unbounded:
if self.left_open:
left = Lt(self.start, symbol)
else:
left = Le(self.start, symbol)
if not self.is_right_unbounded:
if self.right_open:
right = Lt(symbol, self.right)
else:
right = Le(symbol, self.right)
if self.is_left_unbounded and self.is_right_unbounded:
return True # XXX: Contained(symbol, Floats)
elif self.is_left_unbounded:
return right
elif self.is_right_unbounded:
return left
else:
return And(left, right)
class Union(Set, EvalfMixin):
"""
Represents a union of sets as a Set.
Examples
========
>>> from sympy import Union, Interval
>>> Union(Interval(1, 2), Interval(3, 4))
[1, 2] U [3, 4]
The Union constructor will always try to merge overlapping intervals,
if possible. For example:
>>> Union(Interval(1, 2), Interval(2, 3))
[1, 3]
See Also
========
Intersection
References
==========
<http://en.wikipedia.org/wiki/Union_(set_theory)>
"""
is_Union = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', True)
# flatten inputs to merge intersections and iterables
args = list(args)
def flatten(arg):
if isinstance(arg, Set):
if arg.is_Union:
return sum(map(flatten, arg.args), [])
else:
return [arg]
if iterable(arg): # and not isinstance(arg, Set) (implicit)
return sum(map(flatten, arg), [])
raise TypeError("Input must be Sets or iterables of Sets")
args = flatten(args)
# Union of no sets is EmptySet
if len(args)==0:
return S.EmptySet
args = sorted(args, key=default_sort_key)
# Reduce sets using known rules
if evaluate:
return Union.reduce(args)
return Basic.__new__(cls, *args)
@staticmethod
def reduce(args):
"""
Simplify a Union using known rules
We first start with global rules like
'Merge all FiniteSets'
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent
"""
# ===== Global Rules =====
# Merge all finite sets
finite_sets = [x for x in args if x.is_FiniteSet]
if len(finite_sets) > 1:
finite_set = FiniteSet(x for set in finite_sets for x in set)
args = [finite_set] + [x for x in args if not x.is_FiniteSet]
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while(new_args):
for s in args:
new_args = False
for t in args - set((s,)):
new_set = s._union(t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
if not isinstance(new_set, set):
new_set = set((new_set, ))
new_args = (args - set((s, t))).union(new_set)
break
if new_args:
args = new_args
break
if len(args)==1:
return args.pop()
else:
return Union(args, evaluate=False)
@property
def _inf(self):
# We use Min so that sup is meaningful in combination with symbolic
# interval end points.
from sympy.functions.elementary.miscellaneous import Min
return Min(*[set.inf for set in self.args])
@property
def _sup(self):
# We use Max so that sup is meaningful in combination with symbolic
# end points.
from sympy.functions.elementary.miscellaneous import Max
return Max(*[set.sup for set in self.args])
@property
def _complement(self):
# De Morgan's formula.
complement = self.args[0].complement
for set in self.args[1:]:
complement = complement.intersect(set.complement)
return complement
def _contains(self, other):
or_args = [the_set.contains(other) for the_set in self.args]
return Or(*or_args)
@property
def _measure(self):
# Measure of a union is the sum of the measures of the sets minus
# the sum of their pairwise intersections plus the sum of their
# triple-wise intersections minus ... etc...
# Sets is a collection of intersections and a set of elementary
# sets which made up those intersections (called "sos" for set of sets)
# An example element might of this list might be:
# ( {A,B,C}, A.intersect(B).intersect(C) )
# Start with just elementary sets ( ({A}, A), ({B}, B), ... )
# Then get and subtract ( ({A,B}, (A int B), ... ) while non-zero
sets = [(FiniteSet(s), s) for s in self.args]
measure = 0
parity = 1
while sets:
# Add up the measure of these sets and add or subtract it to total
measure += parity * sum(inter.measure for sos, inter in sets)
# For each intersection in sets, compute the intersection with every
# other set not already part of the intersection.
sets = ((sos + FiniteSet(newset), newset.intersect(intersection))
for sos, intersection in sets for newset in self.args
if newset not in sos)
# Clear out sets with no measure
sets = [(sos, inter) for sos, inter in sets if inter.measure != 0]
# Clear out duplicates
sos_list = []
sets_list = []
for set in sets:
if set[0] in sos_list:
continue
else:
sos_list.append(set[0])
sets_list.append(set)
sets = sets_list
# Flip Parity - next time subtract/add if we added/subtracted here
parity *= -1
return measure
def as_relational(self, symbol):
"""Rewrite a Union in terms of equalities and logic operators. """
return Or(*[set.as_relational(symbol) for set in self.args])
@property
def is_iterable(self):
return all(arg.is_iterable for arg in self.args)
def _eval_evalf(self, prec):
try:
return Union(set.evalf() for set in self.args)
except:
raise TypeError("Not all sets are evalf-able")
def __iter__(self):
import itertools
if all(set.is_iterable for set in self.args):
return itertools.chain(*(iter(arg) for arg in self.args))
else:
raise TypeError("Not all constituent sets are iterable")
@property
def is_real(self):
return all(set.is_real for set in self.args)
class Intersection(Set):
"""
Represents an intersection of sets as a Set.
Examples
========
>>> from sympy import Intersection, Interval
>>> Intersection(Interval(1, 3), Interval(2, 4))
[2, 3]
We often use the .intersect method
>>> Interval(1,3).intersect(Interval(2,4))
[2, 3]
See Also
========
Union
References
==========
<http://en.wikipedia.org/wiki/Intersection_(set_theory)>
"""
is_Intersection = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', True)
# flatten inputs to merge intersections and iterables
args = list(args)
def flatten(arg):
if isinstance(arg, Set):
if arg.is_Intersection:
return sum(map(flatten, arg.args), [])
else:
return [arg]
if iterable(arg): # and not isinstance(arg, Set) (implicit)
return sum(map(flatten, arg), [])
raise TypeError("Input must be Sets or iterables of Sets")
args = flatten(args)
# Intersection of no sets is everything
if len(args)==0:
return S.UniversalSet
args = sorted(args, key=default_sort_key)
# Reduce sets using known rules
if evaluate:
return Intersection.reduce(args)
return Basic.__new__(cls, *args)
@property
def is_iterable(self):
return any(arg.is_iterable for arg in self.args)
@property
def _inf(self):
raise NotImplementedError()
@property
def _sup(self):
raise NotImplementedError()
@property
def _complement(self):
raise NotImplementedError()
def _contains(self, other):
from sympy.logic.boolalg import And
return And(*[set.contains(other) for set in self.args])
def __iter__(self):
for s in self.args:
if s.is_iterable:
other_sets = set(self.args) - set((s,))
other = Intersection(other_sets, evaluate=False)
return (x for x in s if x in other)
raise ValueError("None of the constituent sets are iterable")
@staticmethod
def reduce(args):
"""
Simplify an intersection using known rules
We first start with global rules like
'if any empty sets return empty set' and 'distribute any unions'
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent
"""
# ===== Global Rules =====
# If any EmptySets return EmptySet
if any(s.is_EmptySet for s in args):
return S.EmptySet
# If any FiniteSets see which elements of that finite set occur within
# all other sets in the intersection
for s in args:
if s.is_FiniteSet:
return s.__class__(x for x in s
if all(x in other for other in args))
# If any of the sets are unions, return a Union of Intersections
for s in args:
if s.is_Union:
other_sets = set(args) - set((s,))
other = Intersection(other_sets)
return Union(Intersection(arg, other) for arg in s.args)
# At this stage we are guaranteed not to have any
# EmptySets, FiniteSets, or Unions in the intersection
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while(new_args):
for s in args:
new_args = False
for t in args - set((s,)):
new_set = s._intersect(t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
new_args = (args - set((s, t))).union(set((new_set, )))
break
if new_args:
args = new_args
break
if len(args)==1:
return args.pop()
else:
return Intersection(args, evaluate=False)
def as_relational(self, symbol):
"""Rewrite an Intersection in terms of equalities and logic operators"""
return And(*[set.as_relational(symbol) for set in self.args])
class EmptySet(Set):
"""
Represents the empty set. The empty set is available as a singleton
as S.EmptySet.
Examples
========
>>> from sympy import S, Interval
>>> S.EmptySet
EmptySet()
>>> Interval(1, 2).intersect(S.EmptySet)
EmptySet()
See Also
========
UniversalSet
References
==========
http://en.wikipedia.org/wiki/Empty_set
"""
__metaclass__ = Singleton
is_EmptySet = True
def _intersect(self, other):
return S.EmptySet
@property
def _complement(self):
return S.UniversalSet
@property
def _measure(self):
return 0
def _contains(self, other):
return False
def as_relational(self, symbol):
return False
def __len__(self):
return 0
def _union(self, other):
return other
def __iter__(self):
return iter([])
class UniversalSet(Set):
"""
Represents the set of all things.
The universal set is available as a singleton as S.UniversalSet
Examples
========
>>> from sympy import S, Interval
>>> S.UniversalSet
UniversalSet()
>>> Interval(1, 2).intersect(S.UniversalSet)
[1, 2]
See Also
========
EmptySet
References
==========
http://en.wikipedia.org/wiki/Universal_set
"""
__metaclass__ = Singleton
is_UniversalSet = True
def _intersect(self, other):
return other
@property
def _complement(self):
return S.EmptySet
@property
def _measure(self):
return S.Infinity
def _contains(self, other):
return True
def as_relational(self, symbol):
return True
def _union(self, other):
return self
class FiniteSet(Set, EvalfMixin):
"""
Represents a finite set of discrete numbers
Examples
========
>>> from sympy import Symbol, FiniteSet, sets
>>> FiniteSet(1, 2, 3, 4)
{1, 2, 3, 4}
>>> 3 in FiniteSet(1, 2, 3, 4)
True
References
==========
http://en.wikipedia.org/wiki/Finite_set
"""
is_FiniteSet = True
is_iterable = True
def __new__(cls, *args):
if len(args)==1 and iterable(args[0]):
args = args[0]
args = map(sympify, args)
if len(args) == 0:
return EmptySet()
args = frozenset(args) # remove duplicates
obj = Basic.__new__(cls, *args)
obj._elements = args
return obj
def __iter__(self):
return iter(self.args)
def _intersect(self, other):
"""
This function should only be used internally
See Set._intersect for docstring
"""
if isinstance(other, self.__class__):
return self.__class__(*(self._elements & other._elements))
return self.__class__(el for el in self if el in other)
def _union(self, other):
"""
This function should only be used internally
See Set._union for docstring
"""
if other.is_FiniteSet:
return FiniteSet(*(self._elements | other._elements))
# If other set contains one of my elements, remove it from myself
if any(other.contains(x) is True for x in self):
return set((
FiniteSet(x for x in self if other.contains(x) is not True),
other))
return None
def _contains(self, other):
"""
Tests whether an element, other, is in the set.
Relies on Python's set class. This tests for object equality
All inputs are sympified
>>> from sympy import FiniteSet
>>> 1 in FiniteSet(1, 2)
True
>>> 5 in FiniteSet(1, 2)
False
"""
return other in self._elements
@property
def _complement(self):
"""
The complement of a real finite set is the Union of open Intervals
between the elements of the set.
>>> from sympy import FiniteSet
>>> FiniteSet(1, 2, 3).complement
(-oo, 1) U (1, 2) U (2, 3) U (3, oo)
"""
if not all(elem.is_number for elem in self):
raise ValueError("%s: Complement not defined for symbolic inputs"
%self)
# as there are only numbers involved, a straight sort is sufficient;
# default_sort_key is not needed
args = sorted(self.args)
intervals = [] # Build up a list of intervals between the elements
intervals += [Interval(S.NegativeInfinity, args[0], True, True)]
for a, b in zip(args[:-1], args[1:]):
intervals.append(Interval(a, b, True, True)) # open intervals
intervals.append(Interval(args[-1], S.Infinity, True, True))
return Union(intervals, evaluate=False)
@property
def _inf(self):
from sympy.functions.elementary.miscellaneous import Min
return Min(*self)
@property
def _sup(self):
from sympy.functions.elementary.miscellaneous import Max
return Max(*self)
@property
def measure(self):
return 0
def __len__(self):
return len(self.args)
def __sub__(self, other):
return FiniteSet(el for el in self if el not in other)
def as_relational(self, symbol):
"""Rewrite a FiniteSet in terms of equalities and logic operators. """
from sympy.core.relational import Eq
return Or(*[Eq(symbol, elem) for elem in self])
@property
def is_real(self):
return all(el.is_real for el in self)
def compare(self, other):
return (hash(self) - hash(other))
def _eval_evalf(self, prec):
return FiniteSet(elem.evalf(prec) for elem in self)
def _hashable_content(self):
return (self._elements,)
@property
def _sorted_args(self):
from sympy.utilities import default_sort_key
return sorted(self.args, key=default_sort_key)
|
{
"content_hash": "23860d2e6414ec3e43be45a40236ab1e",
"timestamp": "",
"source": "github",
"line_count": 1273,
"max_line_length": 82,
"avg_line_length": 28.407698350353495,
"alnum_prop": 0.55554019301496,
"repo_name": "srjoglekar246/sympy",
"id": "905af67dcedb506a9781495f1c8ec154f70378ed",
"size": "36163",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sympy/core/sets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10283965"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "TeX",
"bytes": "8789"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
"""Discovers Chromecasts on the network using mDNS/zeroconf."""
from uuid import UUID
import six
from zeroconf import ServiceBrowser, Zeroconf
DISCOVER_TIMEOUT = 5
class CastListener(object):
"""Zeroconf Cast Services collection."""
def __init__(self, callback=None):
self.services = {}
self.callback = callback
@property
def count(self):
"""Number of discovered cast services."""
return len(self.services)
@property
def devices(self):
"""List of tuples (ip, host) for each discovered device."""
return list(self.services.values())
# pylint: disable=unused-argument
def remove_service(self, zconf, typ, name):
""" Remove a service from the collection. """
self.services.pop(name, None)
def add_service(self, zconf, typ, name):
""" Add a service to the collection. """
service = None
tries = 0
while service is None and tries < 4:
try:
service = zconf.get_service_info(typ, name)
except IOError:
# If the zerconf fails to receive the necesarry data we abort
# adding the service
break
tries += 1
if not service:
return
def get_value(key):
"""Retrieve value and decode for Python 2/3."""
value = service.properties.get(key.encode('utf-8'))
if value is None or isinstance(value, six.text_type):
return value
return value.decode('utf-8')
ips = zconf.cache.entries_with_name(service.server.lower())
host = repr(ips[0]) if ips else service.server
model_name = get_value('md')
uuid = get_value('id')
friendly_name = get_value('fn')
if uuid:
uuid = UUID(uuid)
self.services[name] = (host, service.port, uuid, model_name,
friendly_name)
if self.callback:
self.callback(name)
def start_discovery(callback=None):
"""
Start discovering chromecasts on the network.
This method will start discovering chromecasts on a separate thread. When
a chromecast is discovered, the callback will be called with the
discovered chromecast's zeroconf name. This is the dictionary key to find
the chromecast metadata in listener.services.
This method returns the CastListener object and the zeroconf ServiceBrowser
object. The CastListener object will contain information for the discovered
chromecasts. To stop discovery, call the stop_discovery method with the
ServiceBrowser object.
"""
listener = CastListener(callback)
return listener, \
ServiceBrowser(Zeroconf(), "_googlecast._tcp.local.", listener)
def stop_discovery(browser):
"""Stop the chromecast discovery thread."""
browser.zc.close()
def discover_chromecasts(max_devices=None, timeout=DISCOVER_TIMEOUT):
""" Discover chromecasts on the network. """
from threading import Event
try:
# pylint: disable=unused-argument
def callback(name):
"""Called when zeroconf has discovered a new chromecast."""
if max_devices is not None and listener.count >= max_devices:
discover_complete.set()
discover_complete = Event()
listener, browser = start_discovery(callback)
# Wait for the timeout or the maximum number of devices
discover_complete.wait(timeout)
return listener.devices
finally:
stop_discovery(browser)
|
{
"content_hash": "53c6ff73cf16b9426087e4a22f88a7c4",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 79,
"avg_line_length": 31.54385964912281,
"alnum_prop": 0.6262513904338154,
"repo_name": "piedar/pychromecast",
"id": "72b649536a2f10d686d2dd818e6b1f2a6013b171",
"size": "3596",
"binary": false,
"copies": "2",
"ref": "refs/heads/cast",
"path": "pychromecast/discovery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Protocol Buffer",
"bytes": "8579"
},
{
"name": "Python",
"bytes": "90574"
}
],
"symlink_target": ""
}
|
"""Pylint plugin for checking in Sphinx, Google, or Numpy style docstrings
"""
from __future__ import print_function, division, absolute_import
import astroid
from pylint.interfaces import IAstroidChecker
from pylint.checkers import BaseChecker
from pylint.checkers import utils as checker_utils
import pylint.extensions._check_docs_utils as utils
class DocstringParameterChecker(BaseChecker):
"""Checker for Sphinx, Google, or Numpy style docstrings
* Check that all function, method and constructor parameters are mentioned
in the params and types part of the docstring. Constructor parameters
can be documented in either the class docstring or ``__init__`` docstring,
but not both.
* Check that there are no naming inconsistencies between the signature and
the documentation, i.e. also report documented parameters that are missing
in the signature. This is important to find cases where parameters are
renamed only in the code, not in the documentation.
* Check that all explicitly raised exceptions in a function are documented
in the function docstring. Caught exceptions are ignored.
Activate this checker by adding the line::
load-plugins=pylint.extensions.docparams
to the ``MASTER`` section of your ``.pylintrc``.
:param linter: linter object
:type linter: :class:`pylint.lint.PyLinter`
"""
__implements__ = IAstroidChecker
name = 'parameter_documentation'
msgs = {
'W9005': ('"%s" has constructor parameters documented in class and __init__',
'multiple-constructor-doc',
'Please remove parameter declarations in the class or constructor.'),
'W9006': ('"%s" not documented as being raised',
'missing-raises-doc',
'Please document exceptions for all raised exception types.'),
'W9008': ('Redundant returns documentation',
'redundant-returns-doc',
'Please remove the return/rtype documentation from this method.'),
'W9010': ('Redundant yields documentation',
'redundant-yields-doc',
'Please remove the yields documentation from this method.'),
'W9011': ('Missing return documentation',
'missing-return-doc',
'Please add documentation about what this method returns.',
{'old_names': [('W9007', 'missing-returns-doc')]}),
'W9012': ('Missing return type documentation',
'missing-return-type-doc',
'Please document the type returned by this method.',
# we can't use the same old_name for two different warnings
# {'old_names': [('W9007', 'missing-returns-doc')]},
),
'W9013': ('Missing yield documentation',
'missing-yield-doc',
'Please add documentation about what this generator yields.',
{'old_names': [('W9009', 'missing-yields-doc')]}),
'W9014': ('Missing yield type documentation',
'missing-yield-type-doc',
'Please document the type yielded by this method.',
# we can't use the same old_name for two different warnings
# {'old_names': [('W9009', 'missing-yields-doc')]},
),
'W9015': ('"%s" missing in parameter documentation',
'missing-param-doc',
'Please add parameter declarations for all parameters.',
{'old_names': [('W9003', 'missing-param-doc')]}),
'W9016': ('"%s" missing in parameter type documentation',
'missing-type-doc',
'Please add parameter type declarations for all parameters.',
{'old_names': [('W9004', 'missing-type-doc')]}),
'W9017': ('"%s" differing in parameter documentation',
'differing-param-doc',
'Please check parameter names in declarations.',
),
'W9018': ('"%s" differing in parameter type documentation',
'differing-type-doc',
'Please check parameter names in type declarations.',
),
}
options = (('accept-no-param-doc',
{'default': True, 'type' : 'yn', 'metavar' : '<y or n>',
'help': 'Whether to accept totally missing parameter '
'documentation in the docstring of a function that has '
'parameters.'
}),
('accept-no-raise-doc',
{'default': True, 'type' : 'yn', 'metavar' : '<y or n>',
'help': 'Whether to accept totally missing raises '
'documentation in the docstring of a function that '
'raises an exception.'
}),
('accept-no-return-doc',
{'default': True, 'type' : 'yn', 'metavar' : '<y or n>',
'help': 'Whether to accept totally missing return '
'documentation in the docstring of a function that '
'returns a statement.'
}),
('accept-no-yields-doc',
{'default': True, 'type' : 'yn', 'metavar': '<y or n>',
'help': 'Whether to accept totally missing yields '
'documentation in the docstring of a generator.'
}),
)
priority = -2
constructor_names = {'__init__', '__new__'}
not_needed_param_in_docstring = {'self', 'cls'}
def visit_functiondef(self, node):
"""Called for function and method definitions (def).
:param node: Node for a function or method definition in the AST
:type node: :class:`astroid.scoped_nodes.Function`
"""
node_doc = utils.docstringify(node.doc)
self.check_functiondef_params(node, node_doc)
self.check_functiondef_returns(node, node_doc)
self.check_functiondef_yields(node, node_doc)
def check_functiondef_params(self, node, node_doc):
node_allow_no_param = None
if node.name in self.constructor_names:
class_node = checker_utils.node_frame_class(node)
if class_node is not None:
class_doc = utils.docstringify(class_node.doc)
self.check_single_constructor_params(class_doc, node_doc, class_node)
# __init__ or class docstrings can have no parameters documented
# as long as the other documents them.
node_allow_no_param = (
class_doc.has_params() or
class_doc.params_documented_elsewhere() or
None
)
class_allow_no_param = (
node_doc.has_params() or
node_doc.params_documented_elsewhere() or
None
)
self.check_arguments_in_docstring(
class_doc, node.args, class_node, class_allow_no_param)
self.check_arguments_in_docstring(
node_doc, node.args, node, node_allow_no_param)
def check_functiondef_returns(self, node, node_doc):
if not node_doc.supports_yields and node.is_generator():
return
return_nodes = node.nodes_of_class(astroid.Return)
if ((node_doc.has_returns() or node_doc.has_rtype()) and
not any(utils.returns_something(ret_node) for ret_node in return_nodes)):
self.add_message(
'redundant-returns-doc',
node=node)
def check_functiondef_yields(self, node, node_doc):
if not node_doc.supports_yields:
return
if ((node_doc.has_yields() or node_doc.has_yields_type()) and
not node.is_generator()):
self.add_message(
'redundant-yields-doc',
node=node)
def visit_raise(self, node):
func_node = node.frame()
if not isinstance(func_node, astroid.FunctionDef):
return
expected_excs = utils.possible_exc_types(node)
if not expected_excs:
return
if not func_node.doc:
# If this is a property setter,
# the property should have the docstring instead.
property_ = utils.get_setters_property(func_node)
if property_:
func_node = property_
doc = utils.docstringify(func_node.doc)
if not doc.is_valid():
if doc.doc:
self._handle_no_raise_doc(expected_excs, func_node)
return
found_excs = doc.exceptions()
missing_excs = expected_excs - found_excs
self._add_raise_message(missing_excs, func_node)
def visit_return(self, node):
if not utils.returns_something(node):
return
func_node = node.frame()
if not isinstance(func_node, astroid.FunctionDef):
return
doc = utils.docstringify(func_node.doc)
if not doc.is_valid() and self.config.accept_no_return_doc:
return
is_property = checker_utils.decorated_with_property(func_node)
if not (doc.has_returns() or
(doc.has_property_returns() and is_property)):
self.add_message(
'missing-return-doc',
node=func_node
)
if not (doc.has_rtype() or
(doc.has_property_type() and is_property)):
self.add_message(
'missing-return-type-doc',
node=func_node
)
def visit_yield(self, node):
func_node = node.frame()
if not isinstance(func_node, astroid.FunctionDef):
return
doc = utils.docstringify(func_node.doc)
if not doc.is_valid() and self.config.accept_no_yields_doc:
return
if doc.supports_yields:
doc_has_yields = doc.has_yields()
doc_has_yields_type = doc.has_yields_type()
else:
doc_has_yields = doc.has_returns()
doc_has_yields_type = doc.has_rtype()
if not doc_has_yields:
self.add_message(
'missing-yield-doc',
node=func_node
)
if not doc_has_yields_type:
self.add_message(
'missing-yield-type-doc',
node=func_node
)
def visit_yieldfrom(self, node):
self.visit_yield(node)
def check_arguments_in_docstring(self, doc, arguments_node, warning_node,
accept_no_param_doc=None):
"""Check that all parameters in a function, method or class constructor
on the one hand and the parameters mentioned in the parameter
documentation (e.g. the Sphinx tags 'param' and 'type') on the other
hand are consistent with each other.
* Undocumented parameters except 'self' are noticed.
* Undocumented parameter types except for 'self' and the ``*<args>``
and ``**<kwargs>`` parameters are noticed.
* Parameters mentioned in the parameter documentation that don't or no
longer exist in the function parameter list are noticed.
* If the text "For the parameters, see" or "For the other parameters,
see" (ignoring additional whitespace) is mentioned in the docstring,
missing parameter documentation is tolerated.
* If there's no Sphinx style, Google style or NumPy style parameter
documentation at all, i.e. ``:param`` is never mentioned etc., the
checker assumes that the parameters are documented in another format
and the absence is tolerated.
:param doc: Docstring for the function, method or class.
:type doc: str
:param arguments_node: Arguments node for the function, method or
class constructor.
:type arguments_node: :class:`astroid.scoped_nodes.Arguments`
:param warning_node: The node to assign the warnings to
:type warning_node: :class:`astroid.scoped_nodes.Node`
:param accept_no_param_doc: Whether or not to allow no parameters
to be documented.
If None then this value is read from the configuration.
:type accept_no_param_doc: bool or None
"""
# Tolerate missing param or type declarations if there is a link to
# another method carrying the same name.
if not doc.doc:
return
if accept_no_param_doc is None:
accept_no_param_doc = self.config.accept_no_param_doc
tolerate_missing_params = doc.params_documented_elsewhere()
# Collect the function arguments.
expected_argument_names = set(arg.name for arg in arguments_node.args)
expected_argument_names.update(arg.name for arg in arguments_node.kwonlyargs)
not_needed_type_in_docstring = (
self.not_needed_param_in_docstring.copy())
if arguments_node.vararg is not None:
expected_argument_names.add(arguments_node.vararg)
not_needed_type_in_docstring.add(arguments_node.vararg)
if arguments_node.kwarg is not None:
expected_argument_names.add(arguments_node.kwarg)
not_needed_type_in_docstring.add(arguments_node.kwarg)
params_with_doc, params_with_type = doc.match_param_docs()
# Tolerate no parameter documentation at all.
if (not params_with_doc and not params_with_type
and accept_no_param_doc):
tolerate_missing_params = True
def _compare_missing_args(found_argument_names, message_id,
not_needed_names):
"""Compare the found argument names with the expected ones and
generate a message if there are arguments missing.
:param set found_argument_names: argument names found in the
docstring
:param str message_id: pylint message id
:param not_needed_names: names that may be omitted
:type not_needed_names: set of str
"""
if not tolerate_missing_params:
missing_argument_names = (
(expected_argument_names - found_argument_names)
- not_needed_names)
if missing_argument_names:
self.add_message(
message_id,
args=(', '.join(
sorted(missing_argument_names)),),
node=warning_node)
def _compare_different_args(found_argument_names, message_id,
not_needed_names):
"""Compare the found argument names with the expected ones and
generate a message if there are extra arguments found.
:param set found_argument_names: argument names found in the
docstring
:param str message_id: pylint message id
:param not_needed_names: names that may be omitted
:type not_needed_names: set of str
"""
differing_argument_names = (
(expected_argument_names ^ found_argument_names)
- not_needed_names - expected_argument_names)
if differing_argument_names:
self.add_message(
message_id,
args=(', '.join(
sorted(differing_argument_names)),),
node=warning_node)
_compare_missing_args(params_with_doc, 'missing-param-doc',
self.not_needed_param_in_docstring)
_compare_missing_args(params_with_type, 'missing-type-doc',
not_needed_type_in_docstring)
_compare_different_args(params_with_doc, 'differing-param-doc',
self.not_needed_param_in_docstring)
_compare_different_args(params_with_type, 'differing-type-doc',
not_needed_type_in_docstring)
def check_single_constructor_params(self, class_doc, init_doc, class_node):
if class_doc.has_params() and init_doc.has_params():
self.add_message(
'multiple-constructor-doc',
args=(class_node.name,),
node=class_node)
def _handle_no_raise_doc(self, excs, node):
if self.config.accept_no_raise_doc:
return
self._add_raise_message(excs, node)
def _add_raise_message(self, missing_excs, node):
"""
Adds a message on :param:`node` for the missing exception type.
:param missing_excs: A list of missing exception types.
:type missing_excs: list
:param node: The node show the message on.
:type node: astroid.node_classes.NodeNG
"""
if not missing_excs:
return
self.add_message(
'missing-raises-doc',
args=(', '.join(sorted(missing_excs)),),
node=node)
def register(linter):
"""Required method to auto register this checker.
:param linter: Main interface object for Pylint plugins
:type linter: Pylint object
"""
linter.register_checker(DocstringParameterChecker(linter))
|
{
"content_hash": "bb88b3385f6049364a48972cccb1548b",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 89,
"avg_line_length": 41.45497630331754,
"alnum_prop": 0.5727106436492512,
"repo_name": "lucidmotifs/auto-aoc",
"id": "a304683e23157277d85ae37bec49884ea0f39718",
"size": "18157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".venv/lib/python3.5/site-packages/pylint/extensions/docparams.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "74"
},
{
"name": "C",
"bytes": "41695"
},
{
"name": "C++",
"bytes": "35306"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "48431"
},
{
"name": "JavaScript",
"bytes": "2043"
},
{
"name": "Python",
"bytes": "4850280"
},
{
"name": "Shell",
"bytes": "3778"
},
{
"name": "Visual Basic",
"bytes": "820"
},
{
"name": "XSLT",
"bytes": "2058"
}
],
"symlink_target": ""
}
|
import numpy as np
import time
from commons.utils import logger # import from ../../commons
from commons import evallib # import from ../../commons
from wsdream import NMF
from scipy import stats
import multiprocessing
#======================================================#
# Function to evalute the approach at all settings
#======================================================#
def execute(matrix, para):
# loop over each density and each round
if para['parallelMode']: # run on multiple processes
pool = multiprocessing.Pool()
for den in para['density']:
for roundId in xrange(para['rounds']):
pool.apply_async(executeOneSetting, (matrix, den, roundId, para))
pool.close()
pool.join()
else: # run on single processes
for den in para['density']:
for roundId in xrange(para['rounds']):
executeOneSetting(matrix, den, roundId, para)
# summarize the dumped results
evallib.summarizeResult(para)
#======================================================#
# Function to run the prediction approach at one setting
#======================================================#
def executeOneSetting(matrix, density, roundId, para):
logger.info('density=%.2f, %2d-round starts.'%(density, roundId + 1))
# remove data matrix
(trainMatrix, testMatrix) = evallib.removeEntries(matrix, density, roundId)
# QoS prediction
startTime = time.clock() # to record the running time for one round
predictedMatrix = NMF.predict(trainMatrix, para)
runningTime = float(time.clock() - startTime)
# evaluate the estimation error
evalResult = evallib.evaluate(testMatrix, predictedMatrix, para)
result = (evalResult, runningTime)
# dump the result at each density
outFile = '%s%s_%s_result_%.2f_round%02d.tmp'%(para['outPath'], para['dataName'],
para['dataType'], density, roundId + 1)
evallib.dumpresult(outFile, result)
logger.info('density=%.2f, %2d-round done.'%(density, roundId + 1))
logger.info('----------------------------------------------')
|
{
"content_hash": "56c98511fed356c84696ef3560b102b7",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 86,
"avg_line_length": 38.410714285714285,
"alnum_prop": 0.5811250581125058,
"repo_name": "wsdream/WS-DREAM",
"id": "ad7febf3404bcb54676994a4eb4caf34a4c18011",
"size": "2362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "benchmarks/model-based/NMF/evaluator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7398"
},
{
"name": "C++",
"bytes": "78282"
},
{
"name": "Python",
"bytes": "131548"
}
],
"symlink_target": ""
}
|
import os, sys
usage = "usage: %s search_text replace_text [infile [outfile]]" % os.path.basename(sys.argv[0])
if len(sys.argv) < 3:
print usage
else:
stext = sys.argv[1]
rtext = sys.argv[2]
input = sys.stdin
output = sys.stdout
if len(sys.argv) > 3:
input = open(sys.argv[3])
if len(sys.argv) > 4:
output = open(sys.argv[4], 'w')
for s in input.xreadlines():
output.write(s.replace(stext, rtext))
# For older versions of Python (1.5.2 and earlier) import
# the string module and replace the last two lines with:
#
# for s in input.readlines():
# output.write(string.replace(s, stext, rtext))
|
{
"content_hash": "70a81528d5ebb7e88c6b164cf03a1aa3",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 103,
"avg_line_length": 29.304347826086957,
"alnum_prop": 0.612759643916914,
"repo_name": "ActiveState/code",
"id": "657920cb2f716972c4808c78c241e215eaeeb7a0",
"size": "696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/52250_Search_replace_text/recipe-52250.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
}
|
"""Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fix is no longer needed.
# XXX : originally copied from scikit-learn
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD
from __future__ import division
import inspect
import re
import warnings
import numpy as np
from scipy import linalg
###############################################################################
# Misc
# helpers to get function arguments
if hasattr(inspect, 'signature'): # py35
def _get_args(function, varargs=False):
params = inspect.signature(function).parameters
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
else:
def _get_args(function, varargs=False):
out = inspect.getargspec(function) # args, varargs, keywords, defaults
if varargs:
return out[:2]
else:
return out[0]
def _safe_svd(A, **kwargs):
"""Wrapper to get around the SVD did not converge error of death"""
# Intel has a bug with their GESVD driver:
# https://software.intel.com/en-us/forums/intel-distribution-for-python/topic/628049 # noqa
# For SciPy 0.18 and up, we can work around it by using
# lapack_driver='gesvd' instead.
if kwargs.get('overwrite_a', False):
raise ValueError('Cannot set overwrite_a=True with this function')
try:
return linalg.svd(A, **kwargs)
except np.linalg.LinAlgError as exp:
from .utils import warn
if 'lapack_driver' in _get_args(linalg.svd):
warn('SVD error (%s), attempting to use GESVD instead of GESDD'
% (exp,))
return linalg.svd(A, lapack_driver='gesvd', **kwargs)
else:
raise
###############################################################################
# Back porting scipy.signal.sosfilt (0.17) and sosfiltfilt (0.18)
def _sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None):
"""copy of SciPy sosfiltfilt"""
sos, n_sections = _validate_sos(sos)
# `method` is "pad"...
ntaps = 2 * n_sections + 1
ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=ntaps)
# These steps follow the same form as filtfilt with modifications
zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...)
zi_shape = [1] * x.ndim
zi_shape[axis] = 2
zi.shape = [n_sections] + zi_shape
x_0 = axis_slice(ext, stop=1, axis=axis)
(y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0)
y_0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0)
y = axis_reverse(y, axis=axis)
if edge > 0:
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def axis_slice(a, start=None, stop=None, step=None, axis=-1):
"""Take a slice along axis 'axis' from 'a'"""
a_slice = [slice(None)] * a.ndim
a_slice[axis] = slice(start, stop, step)
b = a[a_slice]
return b
def axis_reverse(a, axis=-1):
"""Reverse the 1-d slices of `a` along axis `axis`."""
return axis_slice(a, step=-1, axis=axis)
def _validate_pad(padtype, padlen, x, axis, ntaps):
"""Helper to validate padding for filtfilt"""
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
return edge, ext
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
def odd_ext(x, n, axis=-1):
"""Generate a new ndarray by making an odd extension of x along an axis."""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_end = axis_slice(x, start=0, stop=1, axis=axis)
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((2 * left_end - left_ext,
x,
2 * right_end - right_ext),
axis=axis)
return ext
def even_ext(x, n, axis=-1):
"""Create an ndarray that is an even extension of x along an axis."""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def const_ext(x, n, axis=-1):
"""Create an ndarray that is a constant extension of x along an axis"""
if n < 1:
return x
left_end = axis_slice(x, start=0, stop=1, axis=axis)
ones_shape = [1] * x.ndim
ones_shape[axis] = n
ones = np.ones(ones_shape, dtype=x.dtype)
left_ext = ones * left_end
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = ones * right_end
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def sosfilt_zi(sos):
"""Compute an initial state `zi` for the sosfilt function"""
from scipy.signal import lfilter_zi
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def sosfilt(sos, x, axis=-1, zi=None):
"""Filter data along one dimension using cascaded second-order sections"""
from scipy.signal import lfilter
x = np.asarray(x)
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r.' %
(axis, x.shape, n_sections, x_zi_shape))
zf = np.zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
def get_sosfiltfilt():
"""Helper to get sosfiltfilt from scipy"""
try:
from scipy.signal import sosfiltfilt
except ImportError:
sosfiltfilt = _sosfiltfilt
return sosfiltfilt
###############################################################################
# Misc utilities
def assert_true(expr, msg='False is not True'):
"""Fake assert_true without message"""
if not expr:
raise AssertionError(msg)
def assert_is(expr1, expr2, msg=None):
"""Fake assert_is without message"""
assert_true(expr2 is expr2, msg)
def assert_is_not(expr1, expr2, msg=None):
"""Fake assert_is_not without message"""
assert_true(expr1 is not expr2, msg)
assert_raises_regex_impl = None
# from numpy 1.9.1
def assert_raises_regex(exception_class, expected_regexp,
callable_obj=None, *args, **kwargs):
"""
Fail unless an exception of class exception_class and with message that
matches expected_regexp is thrown by callable when invoked with arguments
args and keyword arguments kwargs.
Name of this function adheres to Python 3.2+ reference, but should work in
all versions down to 2.6.
"""
__tracebackhide__ = True # Hide traceback for py.test
import nose
global assert_raises_regex_impl
if assert_raises_regex_impl is None:
try:
# Python 3.2+
assert_raises_regex_impl = nose.tools.assert_raises_regex
except AttributeError:
try:
# 2.7+
assert_raises_regex_impl = nose.tools.assert_raises_regexp
except AttributeError:
# 2.6
# This class is copied from Python2.7 stdlib almost verbatim
class _AssertRaisesContext(object):
def __init__(self, expected, expected_regexp=None):
self.expected = expected
self.expected_regexp = expected_regexp
def failureException(self, msg):
return AssertionError(msg)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"{0} not raised".format(exc_name))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException(
'"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
def impl(cls, regex, callable_obj, *a, **kw):
mgr = _AssertRaisesContext(cls, regex)
if callable_obj is None:
return mgr
with mgr:
callable_obj(*a, **kw)
assert_raises_regex_impl = impl
return assert_raises_regex_impl(exception_class, expected_regexp,
callable_obj, *args, **kwargs)
def _read_volume_info(fobj):
"""An implementation of nibabel.freesurfer.io._read_volume_info, since old
versions of nibabel (<=2.1.0) don't have it.
"""
volume_info = dict()
head = np.fromfile(fobj, '>i4', 1)
if not np.array_equal(head, [20]): # Read two bytes more
head = np.concatenate([head, np.fromfile(fobj, '>i4', 2)])
if not np.array_equal(head, [2, 0, 20]):
warnings.warn("Unknown extension code.")
return volume_info
volume_info['head'] = head
for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',
'zras', 'cras']:
pair = fobj.readline().decode('utf-8').split('=')
if pair[0].strip() != key or len(pair) != 2:
raise IOError('Error parsing volume info.')
if key in ('valid', 'filename'):
volume_info[key] = pair[1].strip()
elif key == 'volume':
volume_info[key] = np.array(pair[1].split()).astype(int)
else:
volume_info[key] = np.array(pair[1].split()).astype(float)
# Ignore the rest
return volume_info
def _serialize_volume_info(volume_info):
"""An implementation of nibabel.freesurfer.io._serialize_volume_info, since
old versions of nibabel (<=2.1.0) don't have it."""
keys = ['head', 'valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',
'zras', 'cras']
diff = set(volume_info.keys()).difference(keys)
if len(diff) > 0:
raise ValueError('Invalid volume info: %s.' % diff.pop())
strings = list()
for key in keys:
if key == 'head':
if not (np.array_equal(volume_info[key], [20]) or np.array_equal(
volume_info[key], [2, 0, 20])):
warnings.warn("Unknown extension code.")
strings.append(np.array(volume_info[key], dtype='>i4').tostring())
elif key in ('valid', 'filename'):
val = volume_info[key]
strings.append('{0} = {1}\n'.format(key, val).encode('utf-8'))
elif key == 'volume':
val = volume_info[key]
strings.append('{0} = {1} {2} {3}\n'.format(
key, val[0], val[1], val[2]).encode('utf-8'))
else:
val = volume_info[key]
strings.append('{0} = {1:0.10g} {2:0.10g} {3:0.10g}\n'.format(
key.ljust(6), val[0], val[1], val[2]).encode('utf-8'))
return b''.join(strings)
|
{
"content_hash": "f00008005e0394ca38cdec1463fb02df",
"timestamp": "",
"source": "github",
"line_count": 433,
"max_line_length": 100,
"avg_line_length": 36.33487297921478,
"alnum_prop": 0.5384224242039026,
"repo_name": "jmontoyam/mne-python",
"id": "7f98674c58143c5f95b87191830757a38e91f43d",
"size": "15733",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mne/fixes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3679"
},
{
"name": "Python",
"bytes": "5539709"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import os
import sys
import six
import time
import curses
from functools import wraps
from kitchen.text.display import textual_width
from . import docs
from .objects import Controller, Color, Command
from .exceptions import TemporaryFileError
from .__version__ import __version__
def logged_in(f):
"""
Decorator for Page methods that require the user to be authenticated.
"""
@wraps(f)
def wrapped_method(self, *args, **kwargs):
if not self.reddit.is_oauth_session():
self.term.show_notification('Not logged in')
return
return f(self, *args, **kwargs)
return wrapped_method
class PageController(Controller):
character_map = {}
class Page(object):
FOOTER = None
def __init__(self, reddit, term, config, oauth):
self.reddit = reddit
self.term = term
self.config = config
self.oauth = oauth
self.content = None
self.nav = None
self.controller = None
self.active = True
self._row = 0
self._subwindows = None
def refresh_content(self, order=None, name=None):
raise NotImplementedError
def _draw_item(self, window, data, inverted):
raise NotImplementedError
def get_selected_item(self):
return self.content.get(self.nav.absolute_index)
def loop(self):
"""
Main control loop runs the following steps:
1. Re-draw the screen
2. Wait for user to press a key (includes terminal resizing)
3. Trigger the method registered to the input key
The loop will run until self.active is set to False from within one of
the methods.
"""
self.active = True
while self.active:
self.draw()
ch = self.term.stdscr.getch()
self.controller.trigger(ch)
@PageController.register(Command('EXIT'))
def exit(self):
if self.term.prompt_y_or_n('Do you really want to quit? (y/n): '):
sys.exit()
@PageController.register(Command('FORCE_EXIT'))
def force_exit(self):
sys.exit()
@PageController.register(Command('HELP'))
def show_help(self):
self.term.open_pager(docs.HELP.strip())
@PageController.register(Command('SORT_HOT'))
def sort_content_hot(self):
self.refresh_content(order='hot')
@PageController.register(Command('SORT_TOP'))
def sort_content_top(self):
if not self.content.order or 'top' not in self.content.order:
self.refresh_content(order='top')
return
choices = {
'1': 'top-hour',
'2': 'top-day',
'3': 'top-week',
'4': 'top-month',
'5': 'top-year',
'6': 'top-all'}
message = docs.TIME_ORDER_MENU.strip().splitlines()
ch = self.term.show_notification(message)
ch = six.unichr(ch)
if ch not in choices:
self.term.show_notification('Invalid option')
return
self.refresh_content(order=choices[ch])
@PageController.register(Command('SORT_RISING'))
def sort_content_rising(self):
self.refresh_content(order='rising')
@PageController.register(Command('SORT_NEW'))
def sort_content_new(self):
self.refresh_content(order='new')
@PageController.register(Command('SORT_CONTROVERSIAL'))
def sort_content_controversial(self):
if not self.content.order or 'controversial' not in self.content.order:
self.refresh_content(order='controversial')
return
choices = {
'1': 'controversial-hour',
'2': 'controversial-day',
'3': 'controversial-week',
'4': 'controversial-month',
'5': 'controversial-year',
'6': 'controversial-all'}
message = docs.TIME_ORDER_MENU.strip().splitlines()
ch = self.term.show_notification(message)
ch = six.unichr(ch)
if ch not in choices:
self.term.show_notification('Invalid option')
return
self.refresh_content(order=choices[ch])
@PageController.register(Command('MOVE_UP'))
def move_cursor_up(self):
self._move_cursor(-1)
self.clear_input_queue()
@PageController.register(Command('MOVE_DOWN'))
def move_cursor_down(self):
self._move_cursor(1)
self.clear_input_queue()
@PageController.register(Command('PAGE_UP'))
def move_page_up(self):
self._move_page(-1)
self.clear_input_queue()
@PageController.register(Command('PAGE_DOWN'))
def move_page_down(self):
self._move_page(1)
self.clear_input_queue()
@PageController.register(Command('PAGE_TOP'))
def move_page_top(self):
self._remove_cursor()
self.nav.page_index = self.content.range[0]
self.nav.cursor_index = 0
self.nav.inverted = False
self._add_cursor()
@PageController.register(Command('PAGE_BOTTOM'))
def move_page_bottom(self):
self._remove_cursor()
self.nav.page_index = self.content.range[1]
self.nav.cursor_index = 0
self.nav.inverted = True
self._add_cursor()
@PageController.register(Command('UPVOTE'))
@logged_in
def upvote(self):
data = self.get_selected_item()
if 'likes' not in data:
self.term.flash()
elif data['likes']:
with self.term.loader('Clearing vote'):
data['object'].clear_vote()
if not self.term.loader.exception:
data['likes'] = None
else:
with self.term.loader('Voting'):
data['object'].upvote()
if not self.term.loader.exception:
data['likes'] = True
@PageController.register(Command('DOWNVOTE'))
@logged_in
def downvote(self):
data = self.get_selected_item()
if 'likes' not in data:
self.term.flash()
elif data['likes'] or data['likes'] is None:
with self.term.loader('Voting'):
data['object'].downvote()
if not self.term.loader.exception:
data['likes'] = False
else:
with self.term.loader('Clearing vote'):
data['object'].clear_vote()
if not self.term.loader.exception:
data['likes'] = None
@PageController.register(Command('SAVE'))
@logged_in
def save(self):
data = self.get_selected_item()
if 'saved' not in data:
self.term.flash()
elif not data['saved']:
with self.term.loader('Saving'):
data['object'].save()
if not self.term.loader.exception:
data['saved'] = True
else:
with self.term.loader('Unsaving'):
data['object'].unsave()
if not self.term.loader.exception:
data['saved'] = False
@PageController.register(Command('LOGIN'))
def login(self):
"""
Prompt to log into the user's account, or log out of the current
account.
"""
if self.reddit.is_oauth_session():
if self.term.prompt_y_or_n('Log out? (y/n): '):
self.oauth.clear_oauth_data()
self.term.show_notification('Logged out')
else:
self.oauth.authorize()
@PageController.register(Command('DELETE'))
@logged_in
def delete_item(self):
"""
Delete a submission or comment.
"""
data = self.get_selected_item()
if data.get('author') != self.reddit.user.name:
self.term.flash()
return
prompt = 'Are you sure you want to delete this? (y/n): '
if not self.term.prompt_y_or_n(prompt):
self.term.show_notification('Canceled')
return
with self.term.loader('Deleting', delay=0):
data['object'].delete()
# Give reddit time to process the request
time.sleep(2.0)
if self.term.loader.exception is None:
self.refresh_content()
@PageController.register(Command('EDIT'))
@logged_in
def edit(self):
"""
Edit a submission or comment.
"""
data = self.get_selected_item()
if data.get('author') != self.reddit.user.name:
self.term.flash()
return
if data['type'] == 'Submission':
subreddit = self.reddit.get_subreddit(self.content.name)
content = data['text']
info = docs.SUBMISSION_EDIT_FILE.format(
content=content, name=subreddit)
elif data['type'] == 'Comment':
content = data['body']
info = docs.COMMENT_EDIT_FILE.format(content=content)
else:
self.term.flash()
return
with self.term.open_editor(info) as text:
if text == content:
self.term.show_notification('Canceled')
return
with self.term.loader('Editing', delay=0):
data['object'].edit(text)
time.sleep(2.0)
if self.term.loader.exception is None:
self.refresh_content()
else:
raise TemporaryFileError()
@PageController.register(Command('INBOX'))
@logged_in
def get_inbox(self):
"""
Checks the inbox for unread messages and displays a notification.
"""
inbox = len(list(self.reddit.get_unread(limit=1)))
message = 'New Messages' if inbox > 0 else 'No New Messages'
self.term.show_notification(message)
def clear_input_queue(self):
"""
Clear excessive input caused by the scroll wheel or holding down a key
"""
with self.term.no_delay():
while self.term.getch() != -1:
continue
def draw(self):
n_rows, n_cols = self.term.stdscr.getmaxyx()
if n_rows < self.term.MIN_HEIGHT or n_cols < self.term.MIN_WIDTH:
# TODO: Will crash when you try to navigate if the terminal is too
# small at startup because self._subwindows will never be populated
return
self._row = 0
self._draw_header()
self._draw_banner()
self._draw_content()
self._draw_footer()
self._add_cursor()
self.term.stdscr.touchwin()
self.term.stdscr.refresh()
def _draw_header(self):
n_rows, n_cols = self.term.stdscr.getmaxyx()
# Note: 2 argument form of derwin breaks PDcurses on Windows 7!
window = self.term.stdscr.derwin(1, n_cols, self._row, 0)
window.erase()
# curses.bkgd expects bytes in py2 and unicode in py3
ch, attr = str(' '), curses.A_REVERSE | curses.A_BOLD | Color.CYAN
window.bkgd(ch, attr)
sub_name = self.content.name
sub_name = sub_name.replace('/r/front', 'Front Page')
sub_name = sub_name.replace('/u/me', 'My Submissions')
sub_name = sub_name.replace('/u/saved', 'My Saved Submissions')
self.term.add_line(window, sub_name, 0, 0)
# Set the terminal title
if len(sub_name) > 50:
title = sub_name.strip('/')
title = title.rsplit('/', 1)[1]
title = title.replace('_', ' ')
else:
title = sub_name
if os.getenv('DISPLAY'):
title += ' - rtv {0}'.format(__version__)
title = self.term.clean(title)
if six.PY3:
# In py3 you can't write bytes to stdout
title = title.decode('utf-8')
title = '\x1b]2;{0}\x07'.format(title)
else:
title = b'\x1b]2;{0}\x07'.format(title)
sys.stdout.write(title)
sys.stdout.flush()
if self.reddit.user is not None:
# The starting position of the name depends on if we're converting
# to ascii or not
width = len if self.config['ascii'] else textual_width
if self.config['hide_username']:
username = "Logged in"
else:
username = self.reddit.user.name
s_col = (n_cols - width(username) - 1)
# Only print username if it fits in the empty space on the right
if (s_col - 1) >= width(sub_name):
self.term.add_line(window, username, 0, s_col)
self._row += 1
def _draw_banner(self):
n_rows, n_cols = self.term.stdscr.getmaxyx()
window = self.term.stdscr.derwin(1, n_cols, self._row, 0)
window.erase()
ch, attr = str(' '), curses.A_BOLD | Color.YELLOW
window.bkgd(ch, attr)
items = docs.BANNER.strip().split(' ')
distance = (n_cols - sum(len(t) for t in items) - 1) / (len(items) - 1)
spacing = max(1, int(distance)) * ' '
text = spacing.join(items)
self.term.add_line(window, text, 0, 0)
if self.content.order is not None:
order = self.content.order.split('-')[0]
col = text.find(order) - 3
window.chgat(0, col, 3, attr | curses.A_REVERSE)
self._row += 1
def _draw_content(self):
"""
Loop through submissions and fill up the content page.
"""
n_rows, n_cols = self.term.stdscr.getmaxyx()
window = self.term.stdscr.derwin(
n_rows - self._row - 1, n_cols, self._row, 0)
window.erase()
win_n_rows, win_n_cols = window.getmaxyx()
self._subwindows = []
page_index, cursor_index, inverted = self.nav.position
step = self.nav.step
# If not inverted, align the first submission with the top and draw
# downwards. If inverted, align the first submission with the bottom
# and draw upwards.
cancel_inverted = True
current_row = (win_n_rows - 1) if inverted else 0
available_rows = win_n_rows
top_item_height = None if inverted else self.nav.top_item_height
for data in self.content.iterate(page_index, step, win_n_cols - 2):
subwin_n_rows = min(available_rows, data['n_rows'])
subwin_inverted = inverted
if top_item_height is not None:
# Special case: draw the page as non-inverted, except for the
# top element. This element will be drawn as inverted with a
# restricted height
subwin_n_rows = min(subwin_n_rows, top_item_height)
subwin_inverted = True
top_item_height = None
subwin_n_cols = win_n_cols - data['h_offset']
start = current_row - subwin_n_rows + 1 if inverted else current_row
subwindow = window.derwin(
subwin_n_rows, subwin_n_cols, start, data['h_offset'])
attr = self._draw_item(subwindow, data, subwin_inverted)
self._subwindows.append((subwindow, attr))
available_rows -= (subwin_n_rows + 1) # Add one for the blank line
current_row += step * (subwin_n_rows + 1)
if available_rows <= 0:
# Indicate the page is full and we can keep the inverted screen.
cancel_inverted = False
break
if len(self._subwindows) == 1:
# Never draw inverted if only one subwindow. The top of the
# subwindow should always be aligned with the top of the screen.
cancel_inverted = True
if cancel_inverted and self.nav.inverted:
# In some cases we need to make sure that the screen is NOT
# inverted. Unfortunately, this currently means drawing the whole
# page over again. Could not think of a better way to pre-determine
# if the content will fill up the page, given that it is dependent
# on the size of the terminal.
self.nav.flip((len(self._subwindows) - 1))
return self._draw_content()
self._row += win_n_rows
def _draw_footer(self):
n_rows, n_cols = self.term.stdscr.getmaxyx()
window = self.term.stdscr.derwin(1, n_cols, self._row, 0)
window.erase()
ch, attr = str(' '), curses.A_REVERSE | curses.A_BOLD | Color.CYAN
window.bkgd(ch, attr)
text = self.FOOTER.strip()
self.term.add_line(window, text, 0, 0)
self._row += 1
def _add_cursor(self):
self._edit_cursor(curses.A_REVERSE)
def _remove_cursor(self):
self._edit_cursor(curses.A_NORMAL)
def _move_cursor(self, direction):
self._remove_cursor()
# Note: ACS_VLINE doesn't like changing the attribute, so disregard the
# redraw flag and opt to always redraw
valid, redraw = self.nav.move(direction, len(self._subwindows))
if not valid:
self.term.flash()
self._add_cursor()
def _move_page(self, direction):
self._remove_cursor()
valid, redraw = self.nav.move_page(direction, len(self._subwindows)-1)
if not valid:
self.term.flash()
self._add_cursor()
def _edit_cursor(self, attribute):
# Don't allow the cursor to go below page index 0
if self.nav.absolute_index < 0:
return
# Don't allow the cursor to go over the number of subwindows
# This could happen if the window is resized and the cursor index is
# pushed out of bounds
if self.nav.cursor_index >= len(self._subwindows):
self.nav.cursor_index = len(self._subwindows) - 1
window, attr = self._subwindows[self.nav.cursor_index]
if attr is not None:
attribute |= attr
n_rows, _ = window.getmaxyx()
for row in range(n_rows):
window.chgat(row, 0, 1, attribute)
|
{
"content_hash": "37826f1920acbc0aae3eed92adaf8a67",
"timestamp": "",
"source": "github",
"line_count": 538,
"max_line_length": 80,
"avg_line_length": 33.405204460966544,
"alnum_prop": 0.5664366792788783,
"repo_name": "shaggytwodope/rtv",
"id": "7e784c2070a03f9cfc07c6687bbab9b04db07533",
"size": "17996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rtv/page.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "2601"
},
{
"name": "HTML",
"bytes": "698"
},
{
"name": "Python",
"bytes": "275325"
}
],
"symlink_target": ""
}
|
def tree_from_traversals(preorder, inorder):
if len(preorder) != len(inorder):
raise ValueError('traversals must have the same length')
if set(preorder) != set(inorder):
raise ValueError('traversals must have the same elements')
if len(set(preorder)) != len(preorder) != len(set(inorder)):
raise ValueError('traversals must contain unique items')
if not preorder:
return {}
value = preorder.pop(0)
index = inorder.index(value)
left_inorder, right_inorder = inorder[:index], inorder[index+1:]
left_preorder = [idx for idx in preorder if idx in left_inorder]
right_preorder = [idx for idx in preorder if idx in right_inorder]
left = tree_from_traversals(left_preorder, left_inorder)
right = tree_from_traversals(right_preorder, right_inorder)
return {'v': value, 'l': left, 'r': right}
|
{
"content_hash": "4890ae5d3b81c050f515f79c70c46e9a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 70,
"avg_line_length": 41.333333333333336,
"alnum_prop": 0.673963133640553,
"repo_name": "exercism/python",
"id": "310a89aeed1fb7934c9deab6c54f5d1ea58e42b3",
"size": "868",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "exercises/practice/satellite/.meta/example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "103144"
},
{
"name": "Python",
"bytes": "934764"
},
{
"name": "Shell",
"bytes": "2960"
}
],
"symlink_target": ""
}
|
"""
sphinx.environment.collectors.asset
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The image collector for sphinx.environment.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
from glob import glob
from os import path
from typing import Any, Dict, List, Set
from docutils import nodes
from docutils.nodes import Node
from docutils.utils import relative_path
from sphinx import addnodes
from sphinx.application import Sphinx
from sphinx.environment import BuildEnvironment
from sphinx.environment.collectors import EnvironmentCollector
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util.i18n import get_image_filename_for_language, search_image_for_language
from sphinx.util.images import guess_mimetype
logger = logging.getLogger(__name__)
class ImageCollector(EnvironmentCollector):
"""Image files collector for sphinx.environment."""
def clear_doc(self, app: Sphinx, env: BuildEnvironment, docname: str) -> None:
env.images.purge_doc(docname)
def merge_other(self, app: Sphinx, env: BuildEnvironment,
docnames: Set[str], other: BuildEnvironment) -> None:
env.images.merge_other(docnames, other.images)
def process_doc(self, app: Sphinx, doctree: nodes.document) -> None:
"""Process and rewrite image URIs."""
docname = app.env.docname
for node in doctree.traverse(nodes.image):
# Map the mimetype to the corresponding image. The writer may
# choose the best image from these candidates. The special key * is
# set if there is only single candidate to be used by a writer.
# The special key ? is set for nonlocal URIs.
candidates: Dict[str, str] = {}
node['candidates'] = candidates
imguri = node['uri']
if imguri.startswith('data:'):
candidates['?'] = imguri
continue
elif imguri.find('://') != -1:
candidates['?'] = imguri
continue
if imguri.endswith(os.extsep + '*'):
# Update `node['uri']` to a relative path from srcdir
# from a relative path from current document.
rel_imgpath, full_imgpath = app.env.relfn2path(imguri, docname)
node['uri'] = rel_imgpath
if app.config.language:
# Search language-specific figures at first
i18n_imguri = get_image_filename_for_language(imguri, app.env)
_, full_i18n_imgpath = app.env.relfn2path(i18n_imguri, docname)
self.collect_candidates(app.env, full_i18n_imgpath, candidates, node)
self.collect_candidates(app.env, full_imgpath, candidates, node)
else:
if app.config.language:
# substitute imguri by figure_language_filename
# (ex. foo.png -> foo.en.png)
imguri = search_image_for_language(imguri, app.env)
# Update `node['uri']` to a relative path from srcdir
# from a relative path from current document.
node['uri'], _ = app.env.relfn2path(imguri, docname)
candidates['*'] = node['uri']
# map image paths to unique image names (so that they can be put
# into a single directory)
for imgpath in candidates.values():
app.env.dependencies[docname].add(imgpath)
if not os.access(path.join(app.srcdir, imgpath), os.R_OK):
logger.warning(__('image file not readable: %s') % imgpath,
location=node, type='image', subtype='not_readable')
continue
app.env.images.add_file(docname, imgpath)
def collect_candidates(self, env: BuildEnvironment, imgpath: str,
candidates: Dict[str, str], node: Node) -> None:
globbed: Dict[str, List[str]] = {}
for filename in glob(imgpath):
new_imgpath = relative_path(path.join(env.srcdir, 'dummy'),
filename)
try:
mimetype = guess_mimetype(filename)
if mimetype is None:
basename, suffix = path.splitext(filename)
mimetype = 'image/x-' + suffix[1:]
if mimetype not in candidates:
globbed.setdefault(mimetype, []).append(new_imgpath)
except OSError as err:
logger.warning(__('image file %s not readable: %s') % (filename, err),
location=node, type='image', subtype='not_readable')
for key, files in globbed.items():
candidates[key] = sorted(files, key=len)[0] # select by similarity
class DownloadFileCollector(EnvironmentCollector):
"""Download files collector for sphinx.environment."""
def clear_doc(self, app: Sphinx, env: BuildEnvironment, docname: str) -> None:
env.dlfiles.purge_doc(docname)
def merge_other(self, app: Sphinx, env: BuildEnvironment,
docnames: Set[str], other: BuildEnvironment) -> None:
env.dlfiles.merge_other(docnames, other.dlfiles)
def process_doc(self, app: Sphinx, doctree: nodes.document) -> None:
"""Process downloadable file paths. """
for node in doctree.traverse(addnodes.download_reference):
targetname = node['reftarget']
if '://' in targetname:
node['refuri'] = targetname
else:
rel_filename, filename = app.env.relfn2path(targetname, app.env.docname)
app.env.dependencies[app.env.docname].add(rel_filename)
if not os.access(filename, os.R_OK):
logger.warning(__('download file not readable: %s') % filename,
location=node, type='download', subtype='not_readable')
continue
node['filename'] = app.env.dlfiles.add_file(app.env.docname, rel_filename)
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_env_collector(ImageCollector)
app.add_env_collector(DownloadFileCollector)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
{
"content_hash": "202377fee6b01cfa1590f51290c6f037",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 90,
"avg_line_length": 43.32214765100671,
"alnum_prop": 0.5888458559256391,
"repo_name": "sonntagsgesicht/regtest",
"id": "0a696aa8dee278c7d6174652c51ebe9bf066fab7",
"size": "6455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".aux/venv/lib/python3.9/site-packages/sphinx/environment/collectors/asset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13888"
}
],
"symlink_target": ""
}
|
'''
Setup script for ChirpText.
Latest version can be found at https://github.com/letuananh/chirptext
:copyright: (c) 2012 Le Tuan Anh <tuananh.ke@gmail.com>
:license: MIT, see LICENSE for more details.
'''
import io
from setuptools import setup
def read(*filenames, **kwargs):
''' Read contents of multiple files and join them together '''
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
# readme_file = 'README.rst' if os.path.isfile('README.rst') else 'README.md'
readme_file = 'README.md'
long_description = read(readme_file)
pkg_info = {}
exec(read('chirptext/__version__.py'), pkg_info)
setup(
name='chirptext', # package file name (<package-name>-version.tar.gz)
version=pkg_info['__version__'],
url=pkg_info['__url__'],
project_urls={
"Bug Tracker": "https://github.com/letuananh/chirptext/issues",
"Source Code": "https://github.com/letuananh/chirptext/"
},
keywords=["nlp", "mecab", "language", "linguistics", "vietnamese", "japanese", "chinese", "kanji", "radical"],
license=pkg_info['__license__'],
author=pkg_info['__author__'],
tests_require=[],
install_requires=[],
python_requires=">=3.5",
author_email=pkg_info['__email__'],
description=pkg_info['__description__'],
long_description=long_description,
long_description_content_type='text/markdown',
packages=['chirptext', 'chirptext.deko'],
package_data={'chirptext': ['data/luke/swadesh/*.txt',
'data/sino/*.csv']},
include_package_data=True,
platforms='any',
test_suite='test',
# Reference: https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=['Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Development Status :: {}'.format(pkg_info['__status__']),
'Natural Language :: English',
'Natural Language :: Vietnamese',
'Natural Language :: Japanese',
'Natural Language :: Chinese (Traditional)',
'Environment :: Plugins',
'Intended Audience :: Developers',
'License :: OSI Approved :: {}'.format(pkg_info['__license__']),
'Operating System :: OS Independent',
'Topic :: Text Processing',
'Topic :: Software Development :: Libraries :: Python Modules']
)
|
{
"content_hash": "9d15a83c4529aa58668faa6ec69bd5fa",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 114,
"avg_line_length": 38.72,
"alnum_prop": 0.5833333333333334,
"repo_name": "letuananh/chirptext",
"id": "c0d417c4361ba986e4bb86a1d23ce5f6bbb7ca8f",
"size": "2952",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "239095"
},
{
"name": "Shell",
"bytes": "365"
}
],
"symlink_target": ""
}
|
"""
Support for Homematic switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.homematic/
"""
import logging
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import STATE_UNKNOWN
import homeassistant.components.homematic as homematic
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['homematic']
def setup_platform(hass, config, add_callback_devices, discovery_info=None):
"""Setup the Homematic switch platform."""
if discovery_info is None:
return
return homematic.setup_hmdevice_discovery_helper(HMSwitch,
discovery_info,
add_callback_devices)
class HMSwitch(homematic.HMDevice, SwitchDevice):
"""Representation of a Homematic switch."""
@property
def is_on(self):
"""Return True if switch is on."""
try:
return self._hm_get_state() > 0
except TypeError:
return False
@property
def current_power_mwh(self):
"""Return the current power usage in mWh."""
if "ENERGY_COUNTER" in self._data:
try:
return self._data["ENERGY_COUNTER"] / 1000
except ZeroDivisionError:
return 0
return None
def turn_on(self, **kwargs):
"""Turn the switch on."""
if self.available:
self._hmdevice.on(self._channel)
def turn_off(self, **kwargs):
"""Turn the switch off."""
if self.available:
self._hmdevice.off(self._channel)
def _check_hm_to_ha_object(self):
"""Check if possible to use the Homematic object as this HA type."""
from pyhomematic.devicetypes.actors import Dimmer, Switch
# Check compatibility from HMDevice
if not super(HMSwitch, self)._check_hm_to_ha_object():
return False
# Check if the Homematic device is correct for this HA device
if isinstance(self._hmdevice, Switch):
return True
if isinstance(self._hmdevice, Dimmer):
return True
_LOGGER.critical("This %s can't be use as switch", self._name)
return False
def _init_data_struct(self):
"""Generate a data dict (self._data) from the Homematic metadata."""
from pyhomematic.devicetypes.actors import Dimmer,\
Switch, SwitchPowermeter
super(HMSwitch, self)._init_data_struct()
# Use STATE
if isinstance(self._hmdevice, Switch):
self._state = "STATE"
# Use LEVEL
if isinstance(self._hmdevice, Dimmer):
self._state = "LEVEL"
# Need sensor values for SwitchPowermeter
if isinstance(self._hmdevice, SwitchPowermeter):
for node in self._hmdevice.SENSORNODE:
self._data.update({node: STATE_UNKNOWN})
# Add state to data dict
if self._state:
_LOGGER.debug("%s init data dict with main node '%s'", self._name,
self._state)
self._data.update({self._state: STATE_UNKNOWN})
else:
_LOGGER.critical("Can't correctly init light %s.", self._name)
|
{
"content_hash": "fbad162fb1945c879325375f354b1c9b",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 78,
"avg_line_length": 32.27450980392157,
"alnum_prop": 0.5993317132442284,
"repo_name": "Julian/home-assistant",
"id": "79d8960cf59c2c0a5f09564bacd6bc8784a3e45e",
"size": "3292",
"binary": false,
"copies": "1",
"ref": "refs/heads/py2",
"path": "homeassistant/components/switch/homematic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1354942"
},
{
"name": "Python",
"bytes": "2755966"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "6430"
}
],
"symlink_target": ""
}
|
import unittest
from docopt import docopt
from djset.commands import COMMAND, _parse_args, _create_djset
from djset.djset import DjSecret, DjConfig
from nose2.tools import params
class TestCommand(unittest.TestCase):
def test_parse_argv(self):
args = docopt(COMMAND % {'cmd':'test'}, argv=['add', 'key1=value1', '--global', '--settings=some.settings'])
self.assertEqual(
args,
{'--global': True,
'--name': None,
'--settings': 'some.settings',
'<key>': None,
'<key>=<value>': 'key1=value1',
'add': True,
'remove': False}
)
class TestParseArgs(unittest.TestCase):
add = {'--global': False,
'<key>': None, #remove
'<key>=<value>': 'key=value', #add
'add': True,
'remove': False}
add_result = {
'func': 'set',
'args': ('key', 'value'),
'kwargs': {'glob': False},
}
add_global = {'--global': True,
'<key>': None, #remove
'<key>=<value>': 'key=value', #add
'add': True,
'remove': False}
add_global_result = {
'func': 'set',
'args': ('key', 'value'),
'kwargs': {'glob': True},
}
add_name = {'--global': True,
'--name': 'djstest',
'<key>': None, #remove
'<key>=<value>': 'key=value', #add
'add': True,
'remove': False}
add_name_result = {'func': 'set',
'args': ('key', 'value'),
'kwargs': {'glob': True},
}
add_invalid = {'--global': False,
'<key>': None, #remove
'<key>=<value>': 'key=', #add
'add': True,
'remove': False}
add_invalid_result = None
remove = {'--global': False,
'<key>': 'key', #remove
'<key>=<value>': None, #add
'add': False,
'remove': True}
remove_result = {
'func': 'remove',
'args': ('key', ),
'kwargs': {'glob': False},
}
@params(
(add, add_result),
(add_global, add_global_result),
(add_name, add_name_result),
(add_invalid, add_invalid_result),
(remove, remove_result),
)
def test_parse_args(self, args, result):
func, args, kwargs = _parse_args(args, DjSecret)
if not func:
self.assertEqual(func, result)
else:
self.assertEqual(func.__name__, result['func'])
self.assertEqual(args, result['args'])
self.assertEqual(kwargs, result['kwargs'])
@params(DjSecret, DjConfig)
def test_create_djset(self, cls):
args = {'--global': True,
'--name': 'djstest',
'<key>': None, #remove
'<key>=<value>': 'key=value', #add
'add': True,
'remove': False}
d = _create_djset(args, cls)
self.assertEqual(d.name, 'djstest')
|
{
"content_hash": "22b29bae99b7d433efa289864ebc2346",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 116,
"avg_line_length": 26.923809523809524,
"alnum_prop": 0.4998231340643792,
"repo_name": "bretth/djset",
"id": "224a98fe204580f406b1bbd262edf5b870e9be05",
"size": "2827",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38461"
},
{
"name": "Shell",
"bytes": "254"
}
],
"symlink_target": ""
}
|
""" Store packages in S3 """
import calendar
import logging
import posixpath
import time
from contextlib import contextmanager
from hashlib import md5
from urllib import urlopen, quote
import boto.s3
from boto.cloudfront import Distribution
from boto.s3.key import Key
import boto.s3.connection
from pyramid.httpexceptions import HTTPFound
from pyramid.settings import asbool
from .base import IStorage
from pypicloud.models import Package
from pypicloud.util import parse_filename, getdefaults
LOG = logging.getLogger(__name__)
SUPPORTED_CALLING_FORMATS = {
'SubdomainCallingFormat': boto.s3.connection.SubdomainCallingFormat,
'VHostCallingFormat': boto.s3.connection.VHostCallingFormat,
'OrdinaryCallingFormat': boto.s3.connection.OrdinaryCallingFormat,
'ProtocolIndependentOrdinaryCallingFormat':
boto.s3.connection.ProtocolIndependentOrdinaryCallingFormat
}
class S3Storage(IStorage):
""" Storage backend that uses S3 """
test = False
def __init__(self, request=None, bucket=None, expire_after=None,
bucket_prefix=None, prepend_hash=None, redirect_urls=None,
use_sse=False,
**kwargs):
super(S3Storage, self).__init__(request, **kwargs)
self.bucket = bucket
self.expire_after = expire_after
self.bucket_prefix = bucket_prefix
self.prepend_hash = prepend_hash
self.redirect_urls = redirect_urls
self.use_sse = use_sse
@classmethod
def configure(cls, settings):
kwargs = super(S3Storage, cls).configure(settings)
kwargs['expire_after'] = int(getdefaults(
settings, 'storage.expire_after', 'aws.expire_after', 60 * 60 *
24))
kwargs['bucket_prefix'] = getdefaults(
settings, 'storage.prefix', 'aws.prefix', '')
kwargs['prepend_hash'] = asbool(getdefaults(
settings, 'storage.prepend_hash', 'aws.prepend_hash', True))
access_key = getdefaults(settings, 'storage.access_key',
'aws.access_key', None)
secret_key = getdefaults(settings, 'storage.secret_key',
'aws.secret_key', None)
host = getdefaults(settings, 'storage.host',
'aws.host', boto.s3.connection.NoHostProvided)
is_secure = getdefaults(settings, 'storage.is_secure',
'aws.is_secure', True)
kwargs['use_sse'] = asbool(getdefaults(
settings, 'storage.server_side_encryption',
'aws.server_side_encryption', False))
calling_format = settings.get('storage.calling_format',
'SubdomainCallingFormat')
kwargs['redirect_urls'] = asbool(settings.get('storage.redirect_urls',
False))
if calling_format not in SUPPORTED_CALLING_FORMATS:
raise ValueError("Only {0} are supported for calling_format"
.format(', '.join(SUPPORTED_CALLING_FORMATS)))
# We used to always use boto.connect_s3 because it can look up buckets
# in any region. New regions require AWS4-HMAC-SHA256, which boto can
# only do with a region connection. So if the region is specified (and
# it must be for new regions like eu-central-1), use a region
# connection.
location = settings.get('storage.region')
if location is None:
s3conn = boto.connect_s3(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
host=host,
is_secure=asbool(is_secure),
calling_format=SUPPORTED_CALLING_FORMATS[calling_format]())
else:
s3conn = boto.s3.connect_to_region(location,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
aws_bucket = getdefaults(settings, 'storage.bucket', 'aws.bucket',
None)
if aws_bucket is None:
raise ValueError("You must specify the 'storage.bucket'")
try:
bucket = s3conn.get_bucket(aws_bucket)
except boto.exception.S3ResponseError as e:
if e.error_code != 'NoSuchBucket':
if e.status == 301:
LOG.warn("Bucket found in different region. Check that "
"the S3 bucket specified in 'storage.bucket' is "
"in 'storage.region'")
raise
location = getdefaults(settings, 'storage.region', 'aws.region',
boto.s3.connection.Location.DEFAULT)
LOG.info("Creating S3 bucket %s in region %s", aws_bucket,
location)
bucket = s3conn.create_bucket(aws_bucket, location=location)
kwargs['bucket'] = bucket
return kwargs
def calculate_path(self, package):
""" Calculates the path of a package """
path = package.name + '/' + package.filename
if self.prepend_hash:
m = md5()
m.update(package.filename)
prefix = m.digest().encode('hex')[:4]
path = prefix + '/' + path
return path
def get_path(self, package):
""" Get the fully-qualified bucket path for a package """
if 'path' not in package.data:
filename = self.calculate_path(package)
package.data['path'] = self.bucket_prefix + filename
return package.data['path']
def list(self, factory=Package):
keys = self.bucket.list(self.bucket_prefix)
for key in keys:
# Boto doesn't send down metadata from bucket.list()
# so we are forced to retrieve each key individually.
key = self.bucket.get_key(key.key)
filename = posixpath.basename(key.key)
name = key.get_metadata('name')
version = key.get_metadata('version')
summary = key.get_metadata('summary')
# We used to not store metadata. This is for backwards
# compatibility
if name is None or version is None:
try:
name, version = parse_filename(filename)
except ValueError:
LOG.warning("S3 file %s has no package name", key.key)
continue
last_modified = boto.utils.parse_ts(key.last_modified)
pkg = factory(name, version, filename, last_modified, summary,
path=key.key)
yield pkg
def _generate_url(self, package):
""" Generate a signed url to the S3 file """
key = Key(self.bucket, self.get_path(package))
return key.generate_url(self.expire_after)
def get_url(self, package):
if self.redirect_urls:
return super(S3Storage, self).get_url(package)
else:
return self._generate_url(package)
def download_response(self, package):
return HTTPFound(location=self._generate_url(package))
def upload(self, package, data):
key = Key(self.bucket)
key.key = self.get_path(package)
key.set_metadata('name', package.name)
key.set_metadata('version', package.version)
# The summary can be None if this package was fetched from upstream
if package.summary:
key.set_metadata('summary', package.summary)
# S3 doesn't support uploading from a non-file stream, so we have to
# read it into memory :(
key.set_contents_from_string(data.read(), encrypt_key=self.use_sse)
def delete(self, package):
path = self.get_path(package)
key = Key(self.bucket)
key.key = path
key.delete()
@contextmanager
def open(self, package):
url = self._generate_url(package)
handle = urlopen(url)
try:
yield handle
finally:
handle.close()
class CloudFrontS3Storage(S3Storage):
""" Storage backend that uses S3 and CloudFront """
def __init__(self, request=None, bucket=None, expire_after=None, bucket_prefix=None,
prepend_hash=None, cloud_front_domain=None, cloud_front_key_file=None,
cloud_front_key_string=None, cloud_front_key_id=None, **kwargs):
super(CloudFrontS3Storage, self).__init__(request, bucket, expire_after, bucket_prefix, prepend_hash, **kwargs)
self.cloud_front_domain = cloud_front_domain
self.cloud_front_key_file = cloud_front_key_file
self.cloud_front_key_id = cloud_front_key_id
self.cloud_front_key_string = cloud_front_key_string
self.distribution = Distribution()
@classmethod
def configure(cls, settings):
kwargs = super(CloudFrontS3Storage, cls).configure(settings)
kwargs['cloud_front_domain'] = getdefaults(
settings, 'storage.cloud_front_domain', 'aws.cloud_front_domain', '')
kwargs['cloud_front_key_file'] = getdefaults(
settings, 'storage.cloud_front_key_file', 'aws.cloud_front_key_file', None)
kwargs['cloud_front_key_string'] = getdefaults(
settings, 'storage.cloud_front_key_string', 'aws.cloud_front_key_string', None)
kwargs['cloud_front_key_id'] = getdefaults(
settings, 'storage.cloud_front_key_id', 'aws.cloud_front_key_id', '')
return kwargs
def _generate_url(self, package):
""" Get the fully-qualified CloudFront path for a package """
path = self.calculate_path(package)
url = self.cloud_front_domain + '/' + quote(path)
if self.cloud_front_key_file or self.cloud_front_key_string:
expire_time = int(time.time() + self.expire_after)
url = self.distribution.create_signed_url(
url, self.cloud_front_key_id, expire_time, private_key_file=self.cloud_front_key_file,
private_key_string=self.cloud_front_key_string)
return url
|
{
"content_hash": "17a320b5f929960bf5a6efd7f4ee2967",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 119,
"avg_line_length": 41.73662551440329,
"alnum_prop": 0.5969236836915796,
"repo_name": "rubikloud/pypicloud",
"id": "19cfd69d773174658a2143fdf08ae2e15e51a858",
"size": "10142",
"binary": false,
"copies": "1",
"ref": "refs/heads/rubikloud",
"path": "pypicloud/storage/s3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "397"
},
{
"name": "HTML",
"bytes": "24656"
},
{
"name": "JavaScript",
"bytes": "26246"
},
{
"name": "Python",
"bytes": "287319"
},
{
"name": "Shell",
"bytes": "2143"
}
],
"symlink_target": ""
}
|
import hashlib
from django import template
from string import Template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from urlparse import urlparse
from django.shortcuts import redirect, render, get_object_or_404
from gamernews.apps.core.models import Account
register = template.Library()
@register.simple_tag
def active(request, name, by_path=False):
if by_path:
path = name
else:
path = reverse(name)
if request.path == path:
return ' active '
return ''
@register.filter
def adjust_for_pagination(value, page):
value, page = int(value), int(page)
adjusted_value = value + ((page - 1) * settings.RESULTS_PER_PAGE)
return adjusted_value
|
{
"content_hash": "6205ac51f1799e86c75f3769de58ba38",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 66,
"avg_line_length": 24.833333333333332,
"alnum_prop": 0.7610738255033557,
"repo_name": "underlost/GamerNews",
"id": "e107a286440d5acefcf5c4b9fb88409969d23938",
"size": "745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gamernews/apps/news/templatetags/blobs_tag.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "226951"
},
{
"name": "JavaScript",
"bytes": "135586"
},
{
"name": "Python",
"bytes": "124181"
}
],
"symlink_target": ""
}
|
from heatclient.common import utils
from oslo_utils import encodeutils
from six.moves.urllib import parse
from heatclient.openstack.common.apiclient import base
class ResourceType(base.Resource):
def __repr__(self):
return "<ResourceType %s>" % self._info
def data(self, **kwargs):
return self.manager.data(self, **kwargs)
def _add_details(self, info):
self.resource_type = info
class ResourceTypeManager(base.BaseManager):
resource_class = ResourceType
KEY = 'resource_types'
def list(self, **kwargs):
"""Get a list of resource types.
:rtype: list of :class:`ResourceType`
"""
url = '/%s' % self.KEY
params = {}
if 'filters' in kwargs:
filters = kwargs.pop('filters')
params.update(filters)
url += '?%s' % parse.urlencode(params, True)
return self._list(url, self.KEY)
def get(self, resource_type):
"""Get the details for a specific resource_type.
:param resource_type: name of the resource type to get the details for
"""
url_str = '/%s/%s' % (
self.KEY,
parse.quote(encodeutils.safe_encode(resource_type), ''))
resp = self.client.get(url_str)
body = utils.get_response_body(resp)
return body
def generate_template(self, resource_type, template_type='cfn'):
url_str = '/%s/%s/template' % (
self.KEY,
parse.quote(encodeutils.safe_encode(resource_type), ''))
if template_type:
url_str += '?%s' % parse.urlencode(
{'template_type': template_type}, True)
resp = self.client.get(url_str)
body = utils.get_response_body(resp)
return body
|
{
"content_hash": "4d858fe987c7b805dc74f1dc80517dad",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 78,
"avg_line_length": 30.372881355932204,
"alnum_prop": 0.5825892857142857,
"repo_name": "ecerulm/python-heatclient",
"id": "952cc3c5ba0df76cf0f5af74b42b4f7cf6f14e5c",
"size": "2366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heatclient/v1/resource_types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "592557"
},
{
"name": "Shell",
"bytes": "5103"
}
],
"symlink_target": ""
}
|
class FenTree (object):
def __init__ (self, array):
self.array, self.tree = [0] * len (array), [0] * (len (array) + 1);
for i in range (len (array)):
self.update (i, array [i]);
def get_parent (self, child):
return (child - (child & -child));
def get_next (self, index):
return (index + (index & -index));
def update (self, index, item):
current, self.array [index] = self.array [index], item;
item -= current;
index += 1;
while (index <= len (self.array)):
self.tree [index] += item;
index = self.get_next (index);
def prefix_sum (self, index):
index += 1;
total = 0;
while (index > 0):
total += self.tree [index];
index = self.get_parent (index);
return (total);
def range_sum (self, x, y):
return (self.prefix_sum (max (x, y)) - self.prefix_sum (min (x, y) - 1));
def describe (self):
print ('ARRAY =>\t', self.array);
print ('Binary Indexed Tree =>\t', self.tree);
if (__name__ == '__main__'):
tree = FenTree ([3,2,-1,6,5,4]);
# tree = FenTree ([int (i) for i in input ('Enter the array (space-separated integers): ').split ()]);
tree.describe ();
tree.update (4, 8); #replaces 5 with 8 in the list given to the fenwick tree
tree.describe ();
print (tree.range_sum (1, 5)); #returns 2-1+6+5+4
print (tree.prefix_sum (5)); #returns 3+2-1+6+5+4
|
{
"content_hash": "67b49b56e2a4fb24af0a9717d3d6561c",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 102,
"avg_line_length": 29.2,
"alnum_prop": 0.5966514459665144,
"repo_name": "rtkasodariya/interview",
"id": "794610ad8a46ae35b3bc2c63fae2e1c2b60d96b6",
"size": "1849",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/tree/fenwick_tree.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "844400"
},
{
"name": "Python",
"bytes": "53011"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
from avame.schedule import Schedule
def test_schedule_seconds():
count = 2
for it in Schedule().every(1).second:
print(it)
count -= 1
if count <= 0:
break
def test_singular_unit():
with pytest.raises(AssertionError):
for it in Schedule().every(2).second:
pass
with pytest.raises(AssertionError):
for it in Schedule().every(2).minute:
pass
with pytest.raises(AssertionError):
for it in Schedule().every(2).hour:
pass
with pytest.raises(AssertionError):
for it in Schedule().every(2).day:
pass
|
{
"content_hash": "cca07ab8193d10ff120866122dd66a2a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 82,
"avg_line_length": 22.393939393939394,
"alnum_prop": 0.6062246278755075,
"repo_name": "nickchen-mitac/fork",
"id": "ab4425e9efd4cd01e1d92057726f8bc3f50faa16",
"size": "763",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_job_schedule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10442"
},
{
"name": "HTML",
"bytes": "11410"
},
{
"name": "JavaScript",
"bytes": "25325"
},
{
"name": "Python",
"bytes": "445788"
},
{
"name": "Shell",
"bytes": "65"
}
],
"symlink_target": ""
}
|
import os
from . import base
class DF(base.ThreadedPollText):
"""
Disk Free Widget
By default the widget only displays if the space is less than warn_space.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('partition', '/', 'the partition to check space'),
('warn_color', 'ff0000', 'Warning color'),
('warn_space', 2, 'Warning space in scale defined by the ``measure`` option.'),
('visible_on_warn', True, 'Only display if warning'),
('measure', "G", "Measurement (G, M, B)"),
('format', '{p} ({uf}{m})',
'String format (p: partition, s: size, '
'f: free space, uf: user free space, m: measure)'),
('update_interval', 60, 'The update interval.'),
]
measures = {"G": 1024 * 1024 * 1024,
"M": 1024 * 1024,
"B": 1024}
def __init__(self, **config):
base.ThreadedPollText.__init__(self, **config)
self.add_defaults(DF.defaults)
self.user_free = 0
self.calc = self.measures[self.measure]
def draw(self):
if self.user_free <= self.warn_space:
self.layout.colour = self.warn_color
else:
self.layout.colour = self.foreground
base.ThreadedPollText.draw(self)
def poll(self):
statvfs = os.statvfs(self.partition)
size = statvfs.f_frsize * statvfs.f_blocks / self.calc
free = statvfs.f_frsize * statvfs.f_bfree / self.calc
self.user_free = statvfs.f_frsize * statvfs.f_bavail / self.calc
if self.visible_on_warn and self.user_free >= self.warn_space:
text = ""
else:
text = self.format.format(p=self.partition, s=size, f=free,
uf=self.user_free, m=self.measure)
return text
|
{
"content_hash": "768549ac41dd368203933df2a29f996c",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 87,
"avg_line_length": 34.320754716981135,
"alnum_prop": 0.565695437053326,
"repo_name": "himaaaatti/qtile",
"id": "e5d729dde2c48d34b05dd15e9042266517971c13",
"size": "2956",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "libqtile/widget/df.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "3598"
},
{
"name": "Makefile",
"bytes": "1351"
},
{
"name": "Python",
"bytes": "951823"
},
{
"name": "Shell",
"bytes": "2870"
}
],
"symlink_target": ""
}
|
from bottle import route, run, request
from fancytext import fancy
@route('/fancy', method='POST')
def fancify():
plain_text = request.POST['text']
return fancy(plain_text)
run(host='localhost', port=8080)
|
{
"content_hash": "54652517117dfe1ccb5c37ca77f0d19f",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 38,
"avg_line_length": 21.7,
"alnum_prop": 0.7050691244239631,
"repo_name": "mamachanko/fancytext-api",
"id": "f9aaf267116356b099f728d5fa13644eae8e4f66",
"size": "242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fancytext-api/fancytext-api.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10692"
},
{
"name": "Shell",
"bytes": "6466"
}
],
"symlink_target": ""
}
|
'''
Agent Target
'''
import sys,os
import re, random
from furl import *
from urllib.parse import urlparse
import time, signal
from multiprocessing import Process
import stomp
import re
from daemonize import Daemonize
from os.path import basename
current_dir = os.path.basename(os.getcwd())
if current_dir == "agents":
sys.path.append('../')
if current_dir == "Kurgan-Framework":
sys.path.append('./')
from libs.STOMP import STOMP_Connector
from libs.FIPA import FIPAMessage
from libs.Transport import Transport
import libs.Utils as utl
import libs.Target as target
import config as cf
from actions.targetAction import TargetAction
AGENT_NAME="AgentTarget"
AGENT_ID="2"
ALL_AGENTS="All"
urlTarget = ''
def set_url_base(url):
mAction = TargetAction()
mAction.set_baseUrlTarget(url)
mAgent = Transport()
mAction.set_mAgent(mAgent)
ret = mAction.requestInfo('inform','Master-Agent','target-agent','ok')
mAction.receive_pkg(mAgent)
def agent_status():
mAgent = Transport()
mAction = TargetAction()
mAction.set_mAgent(mAgent)
ret = mAction.requestInfo('request','All','agent-status','*')
mAction.receive_pkg(mAgent)
def agent_quit():
mAction = TargetAction()
mAgent = Transport()
mAction.set_mAgent(mAgent)
mAction.deregister()
sys.exit(0)
def handler(signum, frame):
print("Exiting of execution...", signum);
agent_quit()
def runAgent():
global urlTarget
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
print("Loading " + AGENT_NAME + " ...\n")
mAgent = Transport()
mAction = TargetAction()
mAction.set_mAgent(mAgent)
mAction.registerAgent()
fm = FIPAMessage()
agent_id=[]
while True:
time.sleep(1)
rcv = mAgent.receive_data_from_agents()
if not len(rcv) == 0:
fm.parse_pkg(rcv)
match = re.search("(agent-name(.)+)(\(\w+\))", rcv)
if match:
field = match.group(3).lstrip()
match2 = re.search("\w+",field)
if match2:
agt_id = match2.group(0)
if agt_id in agent_id:
continue
else:
print("agentID: ", agt_id)
agent_id.append(agt_id)
print(rcv)
mAction.add_available_agent(agt_id)
break
else:
print(rcv)
print("Available Agents: ", mAction.get_available_agents())
mAgent = Transport()
mAction = TargetAction()
mAction.set_mAgent(mAgent)
mAction.cfp("run-target", "*")
msg_id=[]
while True:
time.sleep(1)
rcv = mAgent.receive_data_from_agents()
if not len(rcv) == 0:
fm.parse_pkg(rcv)
match = re.search("message-id:(.\w+\-\w+)", rcv)
if match:
message_id = match.group(1).lstrip()
if message_id in msg_id:
continue
else:
msg_id.append(message_id)
print(rcv)
mAgent.zera_buff()
break
else:
print(rcv)
p = Process(target=set_url_base(urlTarget))#dummy request for loop
p.start()
p.join(3)
def show_help():
print("Kurgan MultiAgent Framework version ", cf.VERSION)
print("Usage: python3 " + __file__ + " <background|foreground>")
print("\nExample:\n")
print("python3 " + __file__ + " background")
exit(0)
def run(background=False):
if background == True:
pid = os.fork()
if pid:
p = basename(sys.argv[0])
myname, file_extension = os.path.splitext(p)
pidfile = '/tmp/%s.pid' % myname
daemon = Daemonize(app=myname, pid=pidfile, action=runAgent)
daemon.start()
else:
runAgent()
def main(args):
global urlTarget
urlTarget = "http://www.kurgan.com.br/"
if args[0] == "foreground":
run(background=False)
else:
if args[0] == "background":
run(background=True)
else:
show_help()
exit
exit
if __name__ == '__main__':
if len(sys.argv) == 1:
show_help()
else:
main(sys.argv[1:])
|
{
"content_hash": "2ba61c339982a512833444fdcaa54065",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 74,
"avg_line_length": 26.189349112426036,
"alnum_prop": 0.5519656574785359,
"repo_name": "glaudsonml/kurgan-ai",
"id": "b5be5f9027fb09208f19c15d48ef7a0458923d31",
"size": "4449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agents/agentTarget.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "122729"
},
{
"name": "HTML",
"bytes": "48894"
},
{
"name": "JavaScript",
"bytes": "1589671"
},
{
"name": "PHP",
"bytes": "72064"
},
{
"name": "Python",
"bytes": "211839"
},
{
"name": "Shell",
"bytes": "5722"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
try:
from urllib import parse as urlparse
except ImportError:
import urlparse # Python 2
import copy
from django.forms import widgets
from django.utils.html import escape
from django.utils.functional import Promise
from django.utils.safestring import mark_safe
from django_countries.conf import settings
COUNTRY_CHANGE_HANDLER = (
"var e=document.getElementById('flag_' + this.id); "
"if (e) e.src = '%s'"
".replace('{code}', this.value.toLowerCase() || '__')"
".replace('{code_upper}', this.value.toUpperCase() || '__');"
)
class LazyChoicesMixin(object):
@property
def choices(self):
"""
When it's time to get the choices, if it was a lazy then figure it out
now and memoize the result.
"""
if isinstance(self._choices, Promise):
self._choices = list(self._choices)
return self._choices
@choices.setter
def choices(self, value):
self._set_choices(value)
def _set_choices(self, value):
self._choices = value
class LazySelectMixin(LazyChoicesMixin):
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.attrs = self.attrs.copy()
obj.choices = copy.copy(self._choices)
memo[id(self)] = obj
return obj
class LazySelect(LazySelectMixin, widgets.Select):
"""
A form Select widget that respects choices being a lazy object.
"""
class LazySelectMultiple(LazySelectMixin, widgets.SelectMultiple):
"""
A form SelectMultiple widget that respects choices being a lazy object.
"""
class CountrySelectWidget(LazySelect):
def __init__(self, *args, **kwargs):
self.layout = kwargs.pop('layout', None) or (
'{widget}<img class="country-select-flag" id="{flag_id}" '
'style="margin: 6px 4px 0" '
'src="{country.flag}">'
)
super(CountrySelectWidget, self).__init__(*args, **kwargs)
def render(self, name, value, attrs=None, renderer=None):
from django_countries.fields import Country
attrs = attrs or {}
widget_id = attrs and attrs.get('id')
if widget_id:
flag_id = 'flag_{id}'.format(id=widget_id)
attrs['onchange'] = COUNTRY_CHANGE_HANDLER % urlparse.urljoin(
settings.STATIC_URL, settings.COUNTRIES_FLAG_URL)
else:
flag_id = ''
# Renderer argument only added in 1.11, keeping backwards compat.
kwargs = {'renderer': renderer} if renderer else {}
widget_render = super(CountrySelectWidget, self).render(
name, value, attrs, **kwargs)
if isinstance(value, Country):
country = value
else:
country = Country(value or '__')
with country.escape:
return mark_safe(self.layout.format(
widget=widget_render, country=country,
flag_id=escape(flag_id)))
|
{
"content_hash": "8c1ef46a5c9f2cd3f5c3636a1b1553ad",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 78,
"avg_line_length": 30.916666666666668,
"alnum_prop": 0.6152291105121294,
"repo_name": "schinckel/django-countries",
"id": "f6b39db15a4bb0ee6f1578a30bac586e670d3dbf",
"size": "2968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_countries/widgets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11991"
},
{
"name": "Python",
"bytes": "109450"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
try:
from library.modules.bigip_device_info import Parameters
from library.modules.bigip_device_info import VirtualAddressesFactManager
from library.modules.bigip_device_info import ArgumentSpec
from library.modules.bigip_device_info import ModuleManager
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_device_info import Parameters
from ansible.modules.network.f5.bigip_device_info import VirtualAddressesFactManager
from ansible.modules.network.f5.bigip_device_info import ArgumentSpec
from ansible.modules.network.f5.bigip_device_info import ModuleManager
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class FakeVirtualAddress:
def __init__(self, *args, **kwargs):
attrs = kwargs.pop('params', {})
for key, value in iteritems(attrs):
setattr(self, key, value)
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
gather_subset=['virtual-servers'],
)
p = Parameters(params=args)
assert p.gather_subset == ['virtual-servers']
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
try:
self.p1 = patch('library.modules.bigip_device_info.modules_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = ['ltm', 'gtm', 'asm']
except Exception:
self.p1 = patch('ansible.modules.network.f5.bigip_device_info.modules_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = ['ltm', 'gtm', 'asm']
def tearDown(self):
self.p1.stop()
def test_get_trunk_facts(self, *args):
set_module_args(dict(
gather_subset=['virtual-addresses'],
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
fixture1 = load_fixture('load_ltm_virtual_address_collection_1.json')
collection = fixture1['items']
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
tm = VirtualAddressesFactManager(module=module)
tm.read_collection_from_device = Mock(return_value=collection)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['queried'] is True
assert 'virtual_addresses' in results
assert len(results['virtual_addresses']) > 0
|
{
"content_hash": "dc5a045b590a652621b08e88bf5a757f",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 95,
"avg_line_length": 30.424,
"alnum_prop": 0.6571128056797265,
"repo_name": "thaim/ansible",
"id": "2291c02a2abcdfe09aa4f1bee9d65dabe4504c75",
"size": "3960",
"binary": false,
"copies": "21",
"ref": "refs/heads/fix-broken-link",
"path": "test/units/modules/network/f5/test_bigip_device_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
import torch
from mmdet.utils import util_mixins
class SamplingResult(util_mixins.NiceRepr):
"""Bbox sampling result.
Example:
>>> # xdoctest: +IGNORE_WANT
>>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA
>>> self = SamplingResult.random(rng=10)
>>> print(f'self = {self}')
self = <SamplingResult({
'neg_bboxes': torch.Size([12, 4]),
'neg_inds': tensor([ 0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12]),
'num_gts': 4,
'pos_assigned_gt_inds': tensor([], dtype=torch.int64),
'pos_bboxes': torch.Size([0, 4]),
'pos_inds': tensor([], dtype=torch.int64),
'pos_is_gt': tensor([], dtype=torch.uint8)
})>
"""
def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,
gt_flags):
self.pos_inds = pos_inds
self.neg_inds = neg_inds
self.pos_bboxes = bboxes[pos_inds]
self.neg_bboxes = bboxes[neg_inds]
self.pos_is_gt = gt_flags[pos_inds]
self.num_gts = gt_bboxes.shape[0]
self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
if gt_bboxes.numel() == 0:
# hack for index error case
assert self.pos_assigned_gt_inds.numel() == 0
self.pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4)
else:
if len(gt_bboxes.shape) < 2:
gt_bboxes = gt_bboxes.view(-1, 4)
self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds.long(), :]
if assign_result.labels is not None:
self.pos_gt_labels = assign_result.labels[pos_inds]
else:
self.pos_gt_labels = None
@property
def bboxes(self):
"""torch.Tensor: concatenated positive and negative boxes"""
return torch.cat([self.pos_bboxes, self.neg_bboxes])
def to(self, device):
"""Change the device of the data inplace.
Example:
>>> self = SamplingResult.random()
>>> print(f'self = {self.to(None)}')
>>> # xdoctest: +REQUIRES(--gpu)
>>> print(f'self = {self.to(0)}')
"""
_dict = self.__dict__
for key, value in _dict.items():
if isinstance(value, torch.Tensor):
_dict[key] = value.to(device)
return self
def __nice__(self):
data = self.info.copy()
data['pos_bboxes'] = data.pop('pos_bboxes').shape
data['neg_bboxes'] = data.pop('neg_bboxes').shape
parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())]
body = ' ' + ',\n '.join(parts)
return '{\n' + body + '\n}'
@property
def info(self):
"""Returns a dictionary of info about the object."""
return {
'pos_inds': self.pos_inds,
'neg_inds': self.neg_inds,
'pos_bboxes': self.pos_bboxes,
'neg_bboxes': self.neg_bboxes,
'pos_is_gt': self.pos_is_gt,
'num_gts': self.num_gts,
'pos_assigned_gt_inds': self.pos_assigned_gt_inds,
}
@classmethod
def random(cls, rng=None, **kwargs):
"""
Args:
rng (None | int | numpy.random.RandomState): seed or state.
kwargs (keyword arguments):
- num_preds: number of predicted boxes
- num_gts: number of true boxes
- p_ignore (float): probability of a predicted box assigned to \
an ignored truth.
- p_assigned (float): probability of a predicted box not being \
assigned.
- p_use_label (float | bool): with labels or not.
Returns:
:obj:`SamplingResult`: Randomly generated sampling result.
Example:
>>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA
>>> self = SamplingResult.random()
>>> print(self.__dict__)
"""
from mmdet.core.bbox import demodata
from mmdet.core.bbox.assigners.assign_result import AssignResult
from mmdet.core.bbox.samplers.random_sampler import RandomSampler
rng = demodata.ensure_rng(rng)
# make probabilistic?
num = 32
pos_fraction = 0.5
neg_pos_ub = -1
assign_result = AssignResult.random(rng=rng, **kwargs)
# Note we could just compute an assignment
bboxes = demodata.random_boxes(assign_result.num_preds, rng=rng)
gt_bboxes = demodata.random_boxes(assign_result.num_gts, rng=rng)
if rng.rand() > 0.2:
# sometimes algorithms squeeze their data, be robust to that
gt_bboxes = gt_bboxes.squeeze()
bboxes = bboxes.squeeze()
if assign_result.labels is None:
gt_labels = None
else:
gt_labels = None # todo
if gt_labels is None:
add_gt_as_proposals = False
else:
add_gt_as_proposals = True # make probabilistic?
sampler = RandomSampler(
num,
pos_fraction,
neg_pos_ub=neg_pos_ub,
add_gt_as_proposals=add_gt_as_proposals,
rng=rng)
self = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
return self
|
{
"content_hash": "ba6685a31b10b41c1c495953771d9a3f",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 81,
"avg_line_length": 35.13815789473684,
"alnum_prop": 0.5381014791237596,
"repo_name": "open-mmlab/mmdetection",
"id": "11a02c5d95a4d633dfea26df7fb3e440494a8be7",
"size": "5389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mmdet/core/bbox/samplers/sampling_result.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2540"
},
{
"name": "Python",
"bytes": "4811377"
},
{
"name": "Shell",
"bytes": "47911"
}
],
"symlink_target": ""
}
|
from TASSELpy.java.util.List import List
## Python-specific class used to apply functional programming
# filtering to a wrapped java list
class FilterList(List):
"""
Python-specific class used to apply functional programming
filtering to a wrapped java list
"""
def __init__(self, *args, **kwargs):
super(FilterList,self).__init__(*args,**kwargs)
## Iterates through the list, but only returns items for which a
# function evaluates True
# @param filterFunc A function that accepts the list generic type as an argument
# and returns true or false
# @return An iterator that only returns objects evaluating true
def filterIterator(self, filterFunc):
"""
Iterates through the list, but only returns items for which a function
evaluates True.
Arguments:
filterFunc -- A function that accepts the list generic type as an argument and
returns true or false
Returns:
An iterator that only returns objects evaluating true
"""
## Loop through the iterator
for item in self.iterator():
if filterFunc(item):
yield item
## Enumerates the list, returning index and items for which a function
# evaluates True
# @param filterFunc A function that accepts the list generic type as an argument
# and returns true or false
# @return An iterator of (index, object) for objects evaluating true
def filterEnumerator(self, filterFunc):
"""
Enumerates the list, returning index and items for which a function
evaluates True
Arguments:
filterFunc -- A function that accepts the list generic type as an argument and
returns true or false
Returns:
An iterator of (index, object) for objects evaluating true
"""
## Loop through the iterator
ind = 0
for item in self.iterator():
if filterFunc(item):
yield (ind, item)
ind += 1
|
{
"content_hash": "ad5ddd5265f2cca078a0e3c610eb09cc",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 86,
"avg_line_length": 34.08196721311475,
"alnum_prop": 0.6339586339586339,
"repo_name": "er432/TASSELpy",
"id": "c914e994409e2a5ef16eec5010b7b4a5f78eeeed",
"size": "2079",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "TASSELpy/java/util/FilterList.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "947691"
},
{
"name": "Shell",
"bytes": "6705"
}
],
"symlink_target": ""
}
|
"""A limited-functionality wallet, which may replace a real wallet in tests"""
from copy import deepcopy
from decimal import Decimal
from enum import Enum
from typing import (
Any,
List,
Optional,
)
from test_framework.address import (
base58_to_byte,
create_deterministic_address_bcrt1_p2tr_op_true,
key_to_p2pkh,
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
output_key_to_p2tr,
)
from test_framework.descriptors import descsum_create
from test_framework.key import (
ECKey,
compute_xonly_pubkey,
)
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
tx_from_hex,
)
from test_framework.script import (
CScript,
LegacySignatureHash,
LEAF_VERSION_TAPSCRIPT,
OP_NOP,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
taproot_construct,
)
from test_framework.script_util import (
key_to_p2pk_script,
key_to_p2pkh_script,
key_to_p2sh_p2wpkh_script,
key_to_p2wpkh_script,
keyhash_to_p2pkh_script,
scripthash_to_p2sh_script,
)
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
)
DEFAULT_FEE = Decimal("0.0001")
class MiniWalletMode(Enum):
"""Determines the transaction type the MiniWallet is creating and spending.
For most purposes, the default mode ADDRESS_OP_TRUE should be sufficient;
it simply uses a fixed bech32m P2TR address whose coins are spent with a
witness stack of OP_TRUE, i.e. following an anyone-can-spend policy.
However, if the transactions need to be modified by the user (e.g. prepending
scriptSig for testing opcodes that are activated by a soft-fork), or the txs
should contain an actual signature, the raw modes RAW_OP_TRUE and RAW_P2PK
can be useful. Summary of modes:
| output | | tx is | can modify | needs
mode | description | address | standard | scriptSig | signing
----------------+-------------------+-----------+----------+------------+----------
ADDRESS_OP_TRUE | anyone-can-spend | bech32m | yes | no | no
RAW_OP_TRUE | anyone-can-spend | - (raw) | no | yes | no
RAW_P2PK | pay-to-public-key | - (raw) | yes | yes | yes
"""
ADDRESS_OP_TRUE = 1
RAW_OP_TRUE = 2
RAW_P2PK = 3
class MiniWallet:
def __init__(self, test_node, *, mode=MiniWalletMode.ADDRESS_OP_TRUE):
self._test_node = test_node
self._utxos = []
self._mode = mode
assert isinstance(mode, MiniWalletMode)
if mode == MiniWalletMode.RAW_OP_TRUE:
self._scriptPubKey = bytes(CScript([OP_TRUE]))
elif mode == MiniWalletMode.RAW_P2PK:
# use simple deterministic private key (k=1)
self._priv_key = ECKey()
self._priv_key.set((1).to_bytes(32, 'big'), True)
pub_key = self._priv_key.get_pubkey()
self._scriptPubKey = key_to_p2pk_script(pub_key.get_bytes())
elif mode == MiniWalletMode.ADDRESS_OP_TRUE:
self._address, self._internal_key = create_deterministic_address_bcrt1_p2tr_op_true()
self._scriptPubKey = bytes.fromhex(self._test_node.validateaddress(self._address)['scriptPubKey'])
def _create_utxo(self, *, txid, vout, value, height):
return {"txid": txid, "vout": vout, "value": value, "height": height}
def _bulk_tx(self, tx, target_weight):
"""Pad a transaction with extra outputs until it reaches a target weight (or higher).
returns the tx
"""
tx.vout.append(CTxOut(nValue=0, scriptPubKey=CScript([OP_RETURN, b'a'])))
dummy_vbytes = (target_weight - tx.get_weight() + 3) // 4
tx.vout[-1].scriptPubKey = CScript([OP_RETURN, b'a' * dummy_vbytes])
# Lower bound should always be off by at most 3
assert_greater_than_or_equal(tx.get_weight(), target_weight)
# Higher bound should always be off by at most 3 + 12 weight (for encoding the length)
assert_greater_than_or_equal(target_weight + 15, tx.get_weight())
def get_balance(self):
return sum(u['value'] for u in self._utxos)
def rescan_utxos(self):
"""Drop all utxos and rescan the utxo set"""
self._utxos = []
res = self._test_node.scantxoutset(action="start", scanobjects=[self.get_descriptor()])
assert_equal(True, res['success'])
for utxo in res['unspents']:
self._utxos.append(self._create_utxo(txid=utxo["txid"], vout=utxo["vout"], value=utxo["amount"], height=utxo["height"]))
def scan_tx(self, tx):
"""Scan the tx and adjust the internal list of owned utxos"""
for spent in tx["vin"]:
# Mark spent. This may happen when the caller has ownership of a
# utxo that remained in this wallet. For example, by passing
# mark_as_spent=False to get_utxo or by using an utxo returned by a
# create_self_transfer* call.
try:
self.get_utxo(txid=spent["txid"], vout=spent["vout"])
except StopIteration:
pass
for out in tx['vout']:
if out['scriptPubKey']['hex'] == self._scriptPubKey.hex():
self._utxos.append(self._create_utxo(txid=tx["txid"], vout=out["n"], value=out["value"], height=0))
def sign_tx(self, tx, fixed_length=True):
"""Sign tx that has been created by MiniWallet in P2PK mode"""
assert_equal(self._mode, MiniWalletMode.RAW_P2PK)
(sighash, err) = LegacySignatureHash(CScript(self._scriptPubKey), tx, 0, SIGHASH_ALL)
assert err is None
# for exact fee calculation, create only signatures with fixed size by default (>49.89% probability):
# 65 bytes: high-R val (33 bytes) + low-S val (32 bytes)
# with the DER header/skeleton data of 6 bytes added, this leads to a target size of 71 bytes
der_sig = b''
while not len(der_sig) == 71:
der_sig = self._priv_key.sign_ecdsa(sighash)
if not fixed_length:
break
tx.vin[0].scriptSig = CScript([der_sig + bytes(bytearray([SIGHASH_ALL]))])
tx.rehash()
def generate(self, num_blocks, **kwargs):
"""Generate blocks with coinbase outputs to the internal address, and call rescan_utxos"""
blocks = self._test_node.generatetodescriptor(num_blocks, self.get_descriptor(), **kwargs)
# Calling rescan_utxos here makes sure that after a generate the utxo
# set is in a clean state. For example, the wallet will update
# - if the caller consumed utxos, but never used them
# - if the caller sent a transaction that is not mined or got rbf'd
# - after block re-orgs
# - the utxo height for mined mempool txs
# - However, the wallet will not consider remaining mempool txs
self.rescan_utxos()
return blocks
def get_scriptPubKey(self):
return self._scriptPubKey
def get_descriptor(self):
return descsum_create(f'raw({self._scriptPubKey.hex()})')
def get_address(self):
assert_equal(self._mode, MiniWalletMode.ADDRESS_OP_TRUE)
return self._address
def get_utxo(self, *, txid: str = '', vout: Optional[int] = None, mark_as_spent=True) -> dict:
"""
Returns a utxo and marks it as spent (pops it from the internal list)
Args:
txid: get the first utxo we find from a specific transaction
"""
self._utxos = sorted(self._utxos, key=lambda k: (k['value'], -k['height'])) # Put the largest utxo last
if txid:
utxo_filter: Any = filter(lambda utxo: txid == utxo['txid'], self._utxos)
else:
utxo_filter = reversed(self._utxos) # By default the largest utxo
if vout is not None:
utxo_filter = filter(lambda utxo: vout == utxo['vout'], utxo_filter)
index = self._utxos.index(next(utxo_filter))
if mark_as_spent:
return self._utxos.pop(index)
else:
return self._utxos[index]
def get_utxos(self, *, mark_as_spent=True):
"""Returns the list of all utxos and optionally mark them as spent"""
utxos = deepcopy(self._utxos)
if mark_as_spent:
self._utxos = []
return utxos
def send_self_transfer(self, *, from_node, **kwargs):
"""Call create_self_transfer and send the transaction."""
tx = self.create_self_transfer(**kwargs)
self.sendrawtransaction(from_node=from_node, tx_hex=tx['hex'])
return tx
def send_to(self, *, from_node, scriptPubKey, amount, fee=1000):
"""
Create and send a tx with an output to a given scriptPubKey/amount,
plus a change output to our internal address. To keep things simple, a
fixed fee given in Satoshi is used.
Note that this method fails if there is no single internal utxo
available that can cover the cost for the amount and the fixed fee
(the utxo with the largest value is taken).
Returns a tuple (txid, n) referring to the created external utxo outpoint.
"""
tx = self.create_self_transfer(fee_rate=0)["tx"]
assert_greater_than_or_equal(tx.vout[0].nValue, amount + fee)
tx.vout[0].nValue -= (amount + fee) # change output -> MiniWallet
tx.vout.append(CTxOut(amount, scriptPubKey)) # arbitrary output -> to be returned
txid = self.sendrawtransaction(from_node=from_node, tx_hex=tx.serialize().hex())
return txid, 1
def send_self_transfer_multi(self, *, from_node, **kwargs):
"""Call create_self_transfer_multi and send the transaction."""
tx = self.create_self_transfer_multi(**kwargs)
self.sendrawtransaction(from_node=from_node, tx_hex=tx["hex"])
return tx
def create_self_transfer_multi(
self,
*,
utxos_to_spend: Optional[List[dict]] = None,
num_outputs=1,
amount_per_output=0,
sequence=0,
fee_per_output=1000,
target_weight=0
):
"""
Create and return a transaction that spends the given UTXOs and creates a
certain number of outputs with equal amounts. The output amounts can be
set by amount_per_output or automatically calculated with a fee_per_output.
"""
utxos_to_spend = utxos_to_spend or [self.get_utxo()]
sequence = [sequence] * len(utxos_to_spend) if type(sequence) is int else sequence
assert_equal(len(utxos_to_spend), len(sequence))
# create simple tx template (1 input, 1 output)
tx = self.create_self_transfer(
fee_rate=0,
utxo_to_spend=utxos_to_spend[0])["tx"]
# duplicate inputs, witnesses and outputs
tx.vin = [deepcopy(tx.vin[0]) for _ in range(len(utxos_to_spend))]
for txin, seq in zip(tx.vin, sequence):
txin.nSequence = seq
tx.wit.vtxinwit = [deepcopy(tx.wit.vtxinwit[0]) for _ in range(len(utxos_to_spend))]
tx.vout = [deepcopy(tx.vout[0]) for _ in range(num_outputs)]
# adapt input prevouts
for i, utxo in enumerate(utxos_to_spend):
tx.vin[i] = CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout']))
# adapt output amounts (use fixed fee per output)
inputs_value_total = sum([int(COIN * utxo['value']) for utxo in utxos_to_spend])
outputs_value_total = inputs_value_total - fee_per_output * num_outputs
for o in tx.vout:
o.nValue = amount_per_output or (outputs_value_total // num_outputs)
if target_weight:
self._bulk_tx(tx, target_weight)
txid = tx.rehash()
return {
"new_utxos": [self._create_utxo(
txid=txid,
vout=i,
value=Decimal(tx.vout[i].nValue) / COIN,
height=0,
) for i in range(len(tx.vout))],
"txid": txid,
"hex": tx.serialize().hex(),
"tx": tx,
}
def create_self_transfer(self, *, fee_rate=Decimal("0.003"), fee=Decimal("0"), utxo_to_spend=None, locktime=0, sequence=0, target_weight=0):
"""Create and return a tx with the specified fee. If fee is 0, use fee_rate, where the resulting fee may be exact or at most one satoshi higher than needed."""
utxo_to_spend = utxo_to_spend or self.get_utxo()
assert fee_rate >= 0
assert fee >= 0
if self._mode in (MiniWalletMode.RAW_OP_TRUE, MiniWalletMode.ADDRESS_OP_TRUE):
vsize = Decimal(104) # anyone-can-spend
elif self._mode == MiniWalletMode.RAW_P2PK:
vsize = Decimal(168) # P2PK (73 bytes scriptSig + 35 bytes scriptPubKey + 60 bytes other)
else:
assert False
send_value = utxo_to_spend["value"] - (fee or (fee_rate * vsize / 1000))
assert send_value > 0
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(utxo_to_spend['txid'], 16), utxo_to_spend['vout']), nSequence=sequence)]
tx.vout = [CTxOut(int(COIN * send_value), bytearray(self._scriptPubKey))]
tx.nLockTime = locktime
if self._mode == MiniWalletMode.RAW_P2PK:
self.sign_tx(tx)
elif self._mode == MiniWalletMode.RAW_OP_TRUE:
tx.vin[0].scriptSig = CScript([OP_NOP] * 43) # pad to identical size
elif self._mode == MiniWalletMode.ADDRESS_OP_TRUE:
tx.wit.vtxinwit = [CTxInWitness()]
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE]), bytes([LEAF_VERSION_TAPSCRIPT]) + self._internal_key]
else:
assert False
assert_equal(tx.get_vsize(), vsize)
if target_weight:
self._bulk_tx(tx, target_weight)
tx_hex = tx.serialize().hex()
new_utxo = self._create_utxo(txid=tx.rehash(), vout=0, value=send_value, height=0)
return {"txid": new_utxo["txid"], "wtxid": tx.getwtxid(), "hex": tx_hex, "tx": tx, "new_utxo": new_utxo}
def sendrawtransaction(self, *, from_node, tx_hex, maxfeerate=0, **kwargs):
txid = from_node.sendrawtransaction(hexstring=tx_hex, maxfeerate=maxfeerate, **kwargs)
self.scan_tx(from_node.decoderawtransaction(tx_hex))
return txid
def send_self_transfer_chain(self, *, from_node, chain_length, utxo_to_spend=None):
"""Create and send a "chain" of chain_length transactions. The nth transaction in
the chain is a child of the n-1th transaction and parent of the n+1th transaction.
Returns the chaintip (nth) utxo
"""
chaintip_utxo = utxo_to_spend or self.get_utxo()
for _ in range(chain_length):
chaintip_utxo = self.send_self_transfer(utxo_to_spend=chaintip_utxo, from_node=from_node)["new_utxo"]
return chaintip_utxo
def getnewdestination(address_type='bech32m'):
"""Generate a random destination of the specified type and return the
corresponding public key, scriptPubKey and address. Supported types are
'legacy', 'p2sh-segwit', 'bech32' and 'bech32m'. Can be used when a random
destination is needed, but no compiled wallet is available (e.g. as
replacement to the getnewaddress/getaddressinfo RPCs)."""
key = ECKey()
key.generate()
pubkey = key.get_pubkey().get_bytes()
if address_type == 'legacy':
scriptpubkey = key_to_p2pkh_script(pubkey)
address = key_to_p2pkh(pubkey)
elif address_type == 'p2sh-segwit':
scriptpubkey = key_to_p2sh_p2wpkh_script(pubkey)
address = key_to_p2sh_p2wpkh(pubkey)
elif address_type == 'bech32':
scriptpubkey = key_to_p2wpkh_script(pubkey)
address = key_to_p2wpkh(pubkey)
elif address_type == 'bech32m':
tap = taproot_construct(compute_xonly_pubkey(key.get_bytes())[0])
pubkey = tap.output_pubkey
scriptpubkey = tap.scriptPubKey
address = output_key_to_p2tr(pubkey)
else:
assert False
return pubkey, scriptpubkey, address
def address_to_scriptpubkey(address):
"""Converts a given address to the corresponding output script (scriptPubKey)."""
payload, version = base58_to_byte(address)
if version == 111: # testnet pubkey hash
return keyhash_to_p2pkh_script(payload)
elif version == 196: # testnet script hash
return scripthash_to_p2sh_script(payload)
# TODO: also support other address formats
else:
assert False
def make_chain(node, address, privkeys, parent_txid, parent_value, n=0, parent_locking_script=None, fee=DEFAULT_FEE):
"""Build a transaction that spends parent_txid.vout[n] and produces one output with
amount = parent_value with a fee deducted.
Return tuple (CTransaction object, raw hex, nValue, scriptPubKey of the output created).
"""
inputs = [{"txid": parent_txid, "vout": n}]
my_value = parent_value - fee
outputs = {address : my_value}
rawtx = node.createrawtransaction(inputs, outputs)
prevtxs = [{
"txid": parent_txid,
"vout": n,
"scriptPubKey": parent_locking_script,
"amount": parent_value,
}] if parent_locking_script else None
signedtx = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=privkeys, prevtxs=prevtxs)
assert signedtx["complete"]
tx = tx_from_hex(signedtx["hex"])
return (tx, signedtx["hex"], my_value, tx.vout[0].scriptPubKey.hex())
def create_child_with_parents(node, address, privkeys, parents_tx, values, locking_scripts, fee=DEFAULT_FEE):
"""Creates a transaction that spends the first output of each parent in parents_tx."""
num_parents = len(parents_tx)
total_value = sum(values)
inputs = [{"txid": tx.rehash(), "vout": 0} for tx in parents_tx]
outputs = {address : total_value - fee}
rawtx_child = node.createrawtransaction(inputs, outputs)
prevtxs = []
for i in range(num_parents):
prevtxs.append({"txid": parents_tx[i].rehash(), "vout": 0, "scriptPubKey": locking_scripts[i], "amount": values[i]})
signedtx_child = node.signrawtransactionwithkey(hexstring=rawtx_child, privkeys=privkeys, prevtxs=prevtxs)
assert signedtx_child["complete"]
return signedtx_child["hex"]
def create_raw_chain(node, first_coin, address, privkeys, chain_length=25):
"""Helper function: create a "chain" of chain_length transactions. The nth transaction in the
chain is a child of the n-1th transaction and parent of the n+1th transaction.
"""
parent_locking_script = None
txid = first_coin["txid"]
chain_hex = []
chain_txns = []
value = first_coin["amount"]
for _ in range(chain_length):
(tx, txhex, value, parent_locking_script) = make_chain(node, address, privkeys, txid, value, 0, parent_locking_script)
txid = tx.rehash()
chain_hex.append(txhex)
chain_txns.append(tx)
return (chain_hex, chain_txns)
|
{
"content_hash": "c68d20d712be61dbb3bb3ca345f27aef",
"timestamp": "",
"source": "github",
"line_count": 439,
"max_line_length": 167,
"avg_line_length": 43.27107061503417,
"alnum_prop": 0.6238681827753211,
"repo_name": "fujicoin/fujicoin",
"id": "a7f5d00262a6b17ef7210e325ec3cc3e070c9e97",
"size": "19211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/test_framework/wallet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "Batchfile",
"bytes": "13"
},
{
"name": "C",
"bytes": "1226556"
},
{
"name": "C++",
"bytes": "10236550"
},
{
"name": "CMake",
"bytes": "29182"
},
{
"name": "Cap'n Proto",
"bytes": "1256"
},
{
"name": "Dockerfile",
"bytes": "1740"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "Java",
"bytes": "547"
},
{
"name": "M4",
"bytes": "221436"
},
{
"name": "Makefile",
"bytes": "147554"
},
{
"name": "Objective-C++",
"bytes": "5500"
},
{
"name": "Python",
"bytes": "2974091"
},
{
"name": "QMake",
"bytes": "438"
},
{
"name": "Sage",
"bytes": "58534"
},
{
"name": "Scheme",
"bytes": "26044"
},
{
"name": "Shell",
"bytes": "168383"
}
],
"symlink_target": ""
}
|
import socket
from django.conf import settings
from django.db.models.signals import post_save, post_delete
from django.db.transaction import atomic
from django.utils import timezone
from desecapi import metrics
from desecapi.models import RRset, RR, Domain
from desecapi.pdns import (
_pdns_post,
NSLORD,
NSMASTER,
_pdns_delete,
_pdns_patch,
_pdns_put,
pdns_id,
construct_catalog_rrset,
)
class PDNSChangeTracker:
"""
Hooks up to model signals to maintain two sets:
- `domain_additions`: set of added domains
- `domain_deletions`: set of deleted domains
The two sets are guaranteed to be disjoint.
Hooks up to model signals to maintain exactly three sets per domain:
- `rr_set_additions`: set of added RR sets
- `rr_set_modifications`: set of modified RR sets
- `rr_set_deletions`: set of deleted RR sets
`additions` and `deletions` are guaranteed to be disjoint:
- If an item is in the set of additions while being deleted, it is removed from `rr_set_additions`.
- If an item is in the set of deletions while being added, it is removed from `rr_set_deletions`.
`modifications` and `deletions` are guaranteed to be disjoint.
- If an item is in the set of deletions while being modified, an exception is raised.
- If an item is in the set of modifications while being deleted, it is removed from `rr_set_modifications`.
Note every change tracker object will track all changes to the model across threading.
To avoid side-effects, it is recommended that in each Django process, only one change
tracker is run at a time, i.e. do not use them in parallel (e.g., in a multi-threading
scenario), do not use them nested.
"""
_active_change_trackers = 0
class PDNSChange:
"""
A reversible, atomic operation against the powerdns API.
"""
def __init__(self, domain_name):
self._domain_name = domain_name
@property
def domain_name(self):
return self._domain_name
@property
def domain_name_normalized(self):
return self._domain_name + "."
@property
def domain_pdns_id(self):
return pdns_id(self._domain_name)
@property
def axfr_required(self):
raise NotImplementedError()
def pdns_do(self):
raise NotImplementedError()
def api_do(self):
raise NotImplementedError()
def update_catalog(self, delete=False):
content = _pdns_patch(
NSMASTER,
"/zones/" + pdns_id(settings.CATALOG_ZONE),
{
"rrsets": [
construct_catalog_rrset(zone=self.domain_name, delete=delete)
]
},
)
metrics.get("desecapi_pdns_catalog_updated").inc()
return content
class CreateDomain(PDNSChange):
@property
def axfr_required(self):
return True
def pdns_do(self):
_pdns_post(
NSLORD,
"/zones?rrsets=false",
{
"name": self.domain_name_normalized,
"kind": "MASTER",
"dnssec": True,
"nsec3param": "1 0 0 -",
"nameservers": settings.DEFAULT_NS,
"rrsets": [
{
"name": self.domain_name_normalized,
"type": "SOA",
# SOA RRset TTL: 300 (used as TTL for negative replies including NSEC3 records)
"ttl": 300,
"records": [
{
# SOA refresh: 1 day (only needed for nslord --> nsmaster replication after RRSIG rotation)
# SOA retry = 1h
# SOA expire: 4 weeks (all signatures will have expired anyways)
# SOA minimum: 3600 (for CDS, CDNSKEY, DNSKEY, NSEC3PARAM)
"content": "get.desec.io. get.desec.io. 1 86400 3600 2419200 3600",
"disabled": False,
}
],
}
],
},
)
_pdns_post(
NSMASTER,
"/zones?rrsets=false",
{
"name": self.domain_name_normalized,
"kind": "SLAVE",
"masters": [socket.gethostbyname("nslord")],
"master_tsig_key_ids": ["default"],
},
)
self.update_catalog()
def api_do(self):
rr_set = RRset(
domain=Domain.objects.get(name=self.domain_name),
type="NS",
subname="",
ttl=settings.DEFAULT_NS_TTL,
)
rr_set.save()
rrs = [RR(rrset=rr_set, content=ns) for ns in settings.DEFAULT_NS]
RR.objects.bulk_create(rrs) # One INSERT
def __str__(self):
return "Create Domain %s" % self.domain_name
class DeleteDomain(PDNSChange):
@property
def axfr_required(self):
return False
def pdns_do(self):
_pdns_delete(NSLORD, "/zones/" + self.domain_pdns_id)
_pdns_delete(NSMASTER, "/zones/" + self.domain_pdns_id)
self.update_catalog(delete=True)
def api_do(self):
pass
def __str__(self):
return "Delete Domain %s" % self.domain_name
class CreateUpdateDeleteRRSets(PDNSChange):
def __init__(self, domain_name, additions, modifications, deletions):
super().__init__(domain_name)
self._additions = additions
self._modifications = modifications
self._deletions = deletions
@property
def axfr_required(self):
return True
def pdns_do(self):
data = {
"rrsets": [
{
"name": RRset.construct_name(subname, self._domain_name),
"type": type_,
"ttl": 1, # some meaningless integer required by pdns's syntax
"changetype": "REPLACE", # don't use "DELETE" due to desec-stack#220, PowerDNS/pdns#7501
"records": [],
}
for type_, subname in self._deletions
]
+ [
{
"name": RRset.construct_name(subname, self._domain_name),
"type": type_,
"ttl": RRset.objects.values_list("ttl", flat=True).get(
domain__name=self._domain_name, type=type_, subname=subname
),
"changetype": "REPLACE",
"records": [
{"content": rr.content, "disabled": False}
for rr in RR.objects.filter(
rrset__domain__name=self._domain_name,
rrset__type=type_,
rrset__subname=subname,
)
],
}
for type_, subname in (self._additions | self._modifications)
- self._deletions
]
}
if data["rrsets"]:
_pdns_patch(NSLORD, "/zones/" + self.domain_pdns_id, data)
def api_do(self):
pass
def __str__(self):
return (
"Update RRsets of %s: additions=%s, modifications=%s, deletions=%s"
% (
self.domain_name,
list(self._additions),
list(self._modifications),
list(self._deletions),
)
)
def __init__(self):
self._domain_additions = set()
self._domain_deletions = set()
self._rr_set_additions = {}
self._rr_set_modifications = {}
self._rr_set_deletions = {}
self.transaction = None
@classmethod
def track(cls, f):
"""
Execute function f with the change tracker.
:param f: Function to be tracked for PDNS-relevant changes.
:return: Returns the return value of f.
"""
with cls():
return f()
def _manage_signals(self, method):
if method not in ["connect", "disconnect"]:
raise ValueError()
getattr(post_save, method)(
self._on_rr_post_save, sender=RR, dispatch_uid=self.__module__
)
getattr(post_delete, method)(
self._on_rr_post_delete, sender=RR, dispatch_uid=self.__module__
)
getattr(post_save, method)(
self._on_rr_set_post_save, sender=RRset, dispatch_uid=self.__module__
)
getattr(post_delete, method)(
self._on_rr_set_post_delete, sender=RRset, dispatch_uid=self.__module__
)
getattr(post_save, method)(
self._on_domain_post_save, sender=Domain, dispatch_uid=self.__module__
)
getattr(post_delete, method)(
self._on_domain_post_delete, sender=Domain, dispatch_uid=self.__module__
)
def __enter__(self):
PDNSChangeTracker._active_change_trackers += 1
assert PDNSChangeTracker._active_change_trackers == 1, (
"Nesting %s is not supported." % self.__class__.__name__
)
self._domain_additions = set()
self._domain_deletions = set()
self._rr_set_additions = {}
self._rr_set_modifications = {}
self._rr_set_deletions = {}
self._manage_signals("connect")
self.transaction = atomic()
self.transaction.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
PDNSChangeTracker._active_change_trackers -= 1
self._manage_signals("disconnect")
if exc_type:
# An exception occurred inside our context, exit db transaction and dismiss pdns changes
self.transaction.__exit__(exc_type, exc_val, exc_tb)
return
# TODO introduce two phase commit protocol
changes = self._compute_changes()
axfr_required = set()
for change in changes:
try:
change.pdns_do()
change.api_do()
if change.axfr_required:
axfr_required.add(change.domain_name)
except Exception as e:
self.transaction.__exit__(type(e), e, e.__traceback__)
exc = ValueError(
f"For changes {list(map(str, changes))}, {type(e)} occurred during {change}: {str(e)}"
)
raise exc from e
self.transaction.__exit__(None, None, None)
for name in axfr_required:
_pdns_put(NSMASTER, "/zones/%s/axfr-retrieve" % pdns_id(name))
Domain.objects.filter(name__in=axfr_required).update(published=timezone.now())
def _compute_changes(self):
changes = []
for domain_name in self._domain_deletions:
# discard any RR set modifications
self._rr_set_additions.pop(domain_name, None)
self._rr_set_modifications.pop(domain_name, None)
self._rr_set_deletions.pop(domain_name, None)
changes.append(PDNSChangeTracker.DeleteDomain(domain_name))
for domain_name in self._rr_set_additions.keys() | self._domain_additions:
if domain_name in self._domain_additions:
changes.append(PDNSChangeTracker.CreateDomain(domain_name))
additions = self._rr_set_additions.get(domain_name, set())
modifications = self._rr_set_modifications.get(domain_name, set())
deletions = self._rr_set_deletions.get(domain_name, set())
assert not (additions & deletions)
assert not (modifications & deletions)
# Due to disjoint guarantees with `deletions`, we have four types of RR sets:
# (1) purely added RR sets
# (2) purely modified RR sets
# (3) added and modified RR sets
# (4) purely deleted RR sets
# We send RR sets to PDNS if one of the following conditions holds:
# (a) RR set was added and has at least one RR
# (b) RR set was modified
# (c) RR set was deleted
# Conditions (b) and (c) are already covered in the modifications and deletions list,
# we filter the additions list to remove newly-added, but empty RR sets
additions -= {
(type_, subname)
for (type_, subname) in additions
if not RR.objects.filter(
rrset__domain__name=domain_name,
rrset__type=type_,
rrset__subname=subname,
).exists()
}
if additions | modifications | deletions:
changes.append(
PDNSChangeTracker.CreateUpdateDeleteRRSets(
domain_name, additions, modifications, deletions
)
)
return changes
def _rr_set_updated(self, rr_set: RRset, deleted=False, created=False):
if self._rr_set_modifications.get(rr_set.domain.name, None) is None:
self._rr_set_additions[rr_set.domain.name] = set()
self._rr_set_modifications[rr_set.domain.name] = set()
self._rr_set_deletions[rr_set.domain.name] = set()
additions = self._rr_set_additions[rr_set.domain.name]
modifications = self._rr_set_modifications[rr_set.domain.name]
deletions = self._rr_set_deletions[rr_set.domain.name]
item = (rr_set.type, rr_set.subname)
match (created, deleted):
case (True, False): # created
additions.add(item)
# can fail with concurrent deletion request
assert item not in modifications
deletions.discard(item)
case (False, True): # deleted
if item in additions:
additions.remove(item)
modifications.discard(item)
# no change to deletions
else:
# item not in additions
modifications.discard(item)
deletions.add(item)
case (False, False): # modified
# we don't care if item was created or not
modifications.add(item)
assert item not in deletions
case _:
raise ValueError(
"An RR set cannot be created and deleted at the same time."
)
def _domain_updated(self, domain: Domain, created=False, deleted=False):
if not created and not deleted:
# NOTE that the name must not be changed by API contract with models, hence here no-op for pdns.
return
name = domain.name
additions = self._domain_additions
deletions = self._domain_deletions
if created and deleted:
raise ValueError(
"A domain set cannot be created and deleted at the same time."
)
if created:
if name in deletions:
deletions.remove(name)
else:
additions.add(name)
elif deleted:
if name in additions:
additions.remove(name)
else:
deletions.add(name)
# noinspection PyUnusedLocal
def _on_rr_post_save(
self, signal, sender, instance: RR, created, update_fields, raw, using, **kwargs
):
self._rr_set_updated(instance.rrset)
# noinspection PyUnusedLocal
def _on_rr_post_delete(self, signal, sender, instance: RR, using, **kwargs):
try:
self._rr_set_updated(instance.rrset)
except RRset.DoesNotExist:
pass
# noinspection PyUnusedLocal
def _on_rr_set_post_save(
self,
signal,
sender,
instance: RRset,
created,
update_fields,
raw,
using,
**kwargs,
):
self._rr_set_updated(instance, created=created)
# noinspection PyUnusedLocal
def _on_rr_set_post_delete(self, signal, sender, instance: RRset, using, **kwargs):
self._rr_set_updated(instance, deleted=True)
# noinspection PyUnusedLocal
def _on_domain_post_save(
self,
signal,
sender,
instance: Domain,
created,
update_fields,
raw,
using,
**kwargs,
):
self._domain_updated(instance, created=created)
# noinspection PyUnusedLocal
def _on_domain_post_delete(self, signal, sender, instance: Domain, using, **kwargs):
self._domain_updated(instance, deleted=True)
def __str__(self):
all_rr_sets = (
self._rr_set_additions.keys()
| self._rr_set_modifications.keys()
| self._rr_set_deletions.keys()
)
all_domains = self._domain_additions | self._domain_deletions
return (
"<%s: %i added or deleted domains; %i added, modified or deleted RR sets>"
% (self.__class__.__name__, len(all_domains), len(all_rr_sets))
)
|
{
"content_hash": "98384b4e98186449bed0c80dd84fd5f1",
"timestamp": "",
"source": "github",
"line_count": 495,
"max_line_length": 127,
"avg_line_length": 35.96767676767677,
"alnum_prop": 0.5221860256122219,
"repo_name": "desec-io/desec-stack",
"id": "9f5dc095840184b151d1ae1897f2b6aeed6e79ae",
"size": "17804",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "api/desecapi/pdns_change_tracker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "5786"
},
{
"name": "HTML",
"bytes": "3777"
},
{
"name": "JavaScript",
"bytes": "22126"
},
{
"name": "Python",
"bytes": "716037"
},
{
"name": "Shell",
"bytes": "10425"
},
{
"name": "Vue",
"bytes": "234220"
}
],
"symlink_target": ""
}
|
class OAuthToolkitError(Exception):
"""
Base class for exceptions
"""
def __init__(self, error=None, redirect_uri=None, *args, **kwargs):
super(OAuthToolkitError, self).__init__(*args, **kwargs)
self.oauthlib_error = error
if redirect_uri:
self.oauthlib_error.redirect_uri = redirect_uri
class FatalClientError(OAuthToolkitError):
"""
Class for critical errors
"""
pass
|
{
"content_hash": "a7ac83ebc2ffd73905e5b52ee547af89",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 71,
"avg_line_length": 25.88235294117647,
"alnum_prop": 0.6272727272727273,
"repo_name": "Sunnepah/iupds-appscale",
"id": "0546cc009a2650c85b9b7ee1ee017a8b67f905d9",
"size": "440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iupdsmanager/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "API Blueprint",
"bytes": "14969"
},
{
"name": "CSS",
"bytes": "254897"
},
{
"name": "HTML",
"bytes": "41274"
},
{
"name": "JavaScript",
"bytes": "360163"
},
{
"name": "Python",
"bytes": "141661"
},
{
"name": "Shell",
"bytes": "85"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Field.merge'
db.alter_column(u'forms_field', 'merge', self.gf('django.db.models.fields.CharField')(max_length=100))
def backwards(self, orm):
# Changing field 'Field.merge'
db.alter_column(u'forms_field', 'merge', self.gf('django.db.models.fields.PositiveSmallIntegerField')())
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'forms.field': {
'Meta': {'ordering': "(u'order',)", 'object_name': 'Field'},
'choices': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'condition': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'default': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'dependency': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'blank': 'True'}),
'field_type': ('django.db.models.fields.IntegerField', [], {}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'fields'", 'to': u"orm['forms.Form']"}),
'help_text': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'merge': ('django.db.models.fields.CharField', [], {'default': "u'0'", 'max_length': '100', 'blank': 'True'}),
'meta': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'placeholder_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': "u''", 'max_length': '100', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'forms.fieldentry': {
'Meta': {'object_name': 'FieldEntry'},
'entry': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'fields'", 'to': u"orm['forms.FormEntry']"}),
'field_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True'})
},
u'forms.form': {
'Meta': {'object_name': 'Form'},
'button_text': ('django.db.models.fields.CharField', [], {'default': "u'Submit'", 'max_length': '50'}),
'email_copies': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'email_from': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_message': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email_subject': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'redirect_url': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'send_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'default': '[1]', 'related_name': "u'forms_form_forms'", 'symmetrical': 'False', 'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'forms.formentry': {
'Meta': {'object_name': 'FormEntry'},
'entry_time': ('django.db.models.fields.DateTimeField', [], {}),
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'entries'", 'to': u"orm['forms.Form']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['forms']
|
{
"content_hash": "7bdb462a6cfc952e946d33bc771d58b1",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 195,
"avg_line_length": 75.84745762711864,
"alnum_prop": 0.548268156424581,
"repo_name": "Afnarel/django-forms-builder",
"id": "a7390083784eefd65df8db231d5c5c80e339ad63",
"size": "8974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forms_builder/forms/south_migrations/0012_auto__chg_field_field_merge.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "9570"
},
{
"name": "Python",
"bytes": "190616"
}
],
"symlink_target": ""
}
|
import os.path
import yaml
def get_ssh_pubkey():
path = os.path.expanduser('~/.ssh/id_rsa.pub')
with file(path, 'rb') as f:
return f.readline().rstrip('\n')
def gen_meta(
name,
extra_meta,
):
meta_data = {
'instance-id': name,
'local-hostname': name,
'public-keys': [],
}
ssh_pubkey = get_ssh_pubkey()
meta_data['public-keys'].append(ssh_pubkey)
for path in extra_meta:
with file(path) as f:
extra_meta_data = yaml.safe_load(f)
if extra_meta_data is not None:
meta_data.update(extra_meta_data)
return meta_data
def write_meta(meta_data, fp):
yaml.safe_dump(
stream=fp,
data=meta_data,
default_flow_style=False,
)
fp.flush()
def gen_user(
name,
extra_user,
):
user_data = [
]
for path in extra_user:
with file(path) as f:
if f.readline() == '#cloud-config-archive\n':
# merge it into ours
extra_user_data = yaml.safe_load(f)
if extra_user_data is not None:
user_data.extend(extra_user_data)
else:
# some other format; slap it in as a single string
f.seek(0)
extra_user_data = f.read()
user_data.append(extra_user_data)
return user_data
def write_user(user_data, fp):
fp.write('#cloud-config-archive\n')
yaml.safe_dump(
stream=fp,
data=user_data,
default_flow_style=False,
)
fp.flush()
|
{
"content_hash": "c7a8372cf39cb8c89bec5b839cde82ae",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 66,
"avg_line_length": 22.619718309859156,
"alnum_prop": 0.523038605230386,
"repo_name": "tv42/downburst",
"id": "c34b38de8f076ee2d50624f279751aff7dea1328",
"size": "1606",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "downburst/meta.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33286"
},
{
"name": "Shell",
"bytes": "881"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
CHECKOUT_TYPE_SELECT = 0
CHECKOUT_TYPE_ANON = 1
CHECKOUT_TYPE_AUTH = 2
CHECKOUT_TYPES = (
(CHECKOUT_TYPE_SELECT, _(u"Anonymous and Authenticated")),
(CHECKOUT_TYPE_ANON, _(u"Anonymous only")),
(CHECKOUT_TYPE_AUTH, _(u"Authenticated only")),
)
SHIPPING_PREFIX = "shipping"
INVOICE_PREFIX = "invoice"
ONE_PAGE_CHECKOUT_FORM = getattr(settings, 'LFS_ONE_PAGE_CHECKOUT_FORM', 'lfs.checkout.forms.OnePageCheckoutForm')
|
{
"content_hash": "0850c69a3783cec8e5960413338b222e",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 114,
"avg_line_length": 34.46666666666667,
"alnum_prop": 0.7369439071566731,
"repo_name": "diefenbach/django-lfs",
"id": "68014113e013dde19d9e79b06bdeb09d1d5b49a8",
"size": "517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lfs/checkout/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "96584"
},
{
"name": "HTML",
"bytes": "616573"
},
{
"name": "JavaScript",
"bytes": "591609"
},
{
"name": "Python",
"bytes": "1425991"
}
],
"symlink_target": ""
}
|
import codecs
import json
import pkg_resources
import pytest
from base64 import b64encode
from jsonschema_serialize_fork import Draft4Validator
from pyramid.compat import ascii_native_
from snovault import TYPES
from urllib.parse import urlparse
from .datafixtures import ORDER
pytestmark = [pytest.mark.setone, pytest.mark.working, pytest.mark.schema]
def _type_length():
# Not a fixture as we need to parameterize tests on this
utf8 = codecs.getreader("utf-8")
type_length_dict = {}
for name in ORDER:
try:
utf8_stream = utf8(pkg_resources.resource_stream('encoded', 'tests/data/workbook-inserts/%s.json' % name))
type_length_dict[name] = len(json.load(utf8_stream))
except Exception:
type_length_dict[name] = 0
return type_length_dict
TYPE_LENGTH = _type_length()
INDEX_DATA_TYPES = ['file_fastq', 'workflow_run_awsem', 'biosample', 'experiment_set']
PUBLIC_COLLECTIONS = [
'source',
'platform',
'treatment',
'lab',
'award',
'target',
'organism',
]
def test_home(anonhtmltestapp):
res = anonhtmltestapp.get('/', status=200)
assert res.body.startswith(b'<!DOCTYPE html>')
def test_home_json(testapp):
res = testapp.get('/', status=200)
assert res.json['@type']
def test_home_app_version(testapp):
res = testapp.get('/', status=200)
assert 'app_version' in res.json
def test_vary_html(anonhtmltestapp):
res = anonhtmltestapp.get('/', status=200)
assert res.vary is not None
assert 'Accept' in res.vary
def test_vary_json(anontestapp):
res = anontestapp.get('/', status=200)
assert res.vary is not None
assert 'Accept' in res.vary
def test_get_health_page(testapp):
"""
Tests that we can get the health page and various fields we expect are there
"""
res = testapp.get('/health', status=200).json
assert 'namespace' in res
assert 'blob_bucket' in res
assert 'elasticsearch' in res
@pytest.mark.parametrize('item_type', [k for k in TYPE_LENGTH if k != 'user'])
def test_collections_anon(anontestapp, item_type):
res = anontestapp.get('/' + item_type).follow(status=200)
assert '@graph' in res.json
@pytest.mark.parametrize('item_type', [k for k in TYPE_LENGTH if k != 'user'])
def test_html_collections_anon(anonhtmltestapp, item_type):
res = anonhtmltestapp.get('/' + item_type).follow(status=200)
assert res.body.startswith(b'<!DOCTYPE html>')
@pytest.mark.parametrize('item_type', TYPE_LENGTH)
def test_html_collections(htmltestapp, item_type):
res = htmltestapp.get('/' + item_type).follow(status=200)
assert res.body.startswith(b'<!DOCTYPE html>')
@pytest.mark.slow
@pytest.mark.parametrize('item_type', [k for k in TYPE_LENGTH if k != 'user'])
def test_html_server_pages(item_type, wsgi_app):
res = wsgi_app.get(
'/%s?limit=1' % item_type,
headers={'Accept': 'application/json'},
).follow(
status=200,
headers={'Accept': 'application/json'},
)
for item in res.json['@graph']:
res = wsgi_app.get(item['@id'], status=200)
assert res.body.startswith(b'<!DOCTYPE html>')
assert b'Internal Server Error' not in res.body
@pytest.mark.parametrize('item_type', TYPE_LENGTH)
def test_json(testapp, item_type):
res = testapp.get('/' + item_type).follow(status=200)
assert res.json['@type']
def test_json_basic_auth(anonhtmltestapp):
url = '/'
value = "Authorization: Basic %s" % ascii_native_(b64encode(b'nobody:pass'))
res = anonhtmltestapp.get(url, headers={'Authorization': value}, status=401)
assert res.content_type == 'application/json'
def _test_antibody_approval_creation(testapp):
new_antibody = {'foo': 'bar'}
res = testapp.post_json('/antibodies/', new_antibody, status=201)
assert res.location
assert '/profiles/result' in res.json['@type']['profile']
assert res.json['@graph'] == [{'href': urlparse(res.location).path}]
res = testapp.get(res.location, status=200)
assert '/profiles/antibody_approval' in res.json['@type']
data = res.json
for key in new_antibody:
assert data[key] == new_antibody[key]
res = testapp.get('/antibodies/', status=200)
assert len(res.json['@graph']) == 1
def test_load_sample_data(
analysis_step,
award,
human_biosample,
construct,
document,
experiment,
file,
lab,
organism,
publication,
publication_tracking,
software,
human_biosource,
submitter,
workflow_mapping,
workflow_run_sbg,
workflow_run_awsem,
):
assert True, 'Fixtures have loaded sample data'
def test_abstract_collection(testapp, experiment):
# TODO: ASK_BEN how to get experiment to function as catch all
pass
# testapp.get('/experiment/{accession}'.format(**experiment))
# testapp.get('/expermient/{accession}'.format(**experiment))
def test_collection_post(testapp):
item = {
'name': 'human',
'scientific_name': 'Homo sapiens',
'taxon_id': '9606',
}
return testapp.post_json('/organism', item, status=201)
def test_collection_post_bad_json(testapp):
item = {'foo': 'bar'}
res = testapp.post_json('/organism', item, status=422)
assert res.json['errors']
def test_collection_post_malformed_json(testapp):
item = '{'
headers = {'Content-Type': 'application/json'}
res = testapp.post('/organism', item, status=400, headers=headers)
assert res.json['detail'].startswith('Expecting')
def test_collection_post_missing_content_type(testapp):
item = '{}'
testapp.post('/organism', item, status=415)
def test_collection_post_bad_(anontestapp):
value = "Authorization: Basic %s" % ascii_native_(b64encode(b'nobody:pass'))
anontestapp.post_json('/organism', {}, headers={'Authorization': value}, status=401)
def test_item_actions_filtered_by_permission(testapp, authenticated_testapp, human_biosource):
location = human_biosource['@id'] + '?frame=page'
res = testapp.get(location)
assert any(action for action in res.json.get('actions', []) if action['name'] == 'edit')
res = authenticated_testapp.get(location)
assert not any(action for action in res.json.get('actions', []) if action['name'] == 'edit')
def test_collection_put(testapp, execute_counter):
initial = {
"name": "human",
"scientific_name": "Homo sapiens",
"taxon_id": "9606",
}
item_url = testapp.post_json('/organism', initial).location
with execute_counter.expect(1):
item = testapp.get(item_url + '?frame=object').json
for key in initial:
assert item[key] == initial[key]
update = {
'name': 'mouse',
'scientific_name': 'Mus musculus',
'taxon_id': '10090',
}
testapp.put_json(item_url, update, status=200)
res = testapp.get('/' + item['uuid'] + '?frame=object').follow().json
for key in update:
assert res[key] == update[key]
def test_post_duplicate_uuid(testapp, mouse):
item = {
'uuid': mouse['uuid'],
'name': 'human',
'scientific_name': 'Homo sapiens',
'taxon_id': '9606',
}
testapp.post_json('/organism', item, status=409)
def test_user_effective_principals(submitter, lab, anontestapp, execute_counter):
email = submitter['email']
with execute_counter.expect(1):
res = anontestapp.get('/@@testing-user',
extra_environ={'REMOTE_USER': str(email)})
assert sorted(res.json['effective_principals']) == [
'group.submitter',
'lab.%s' % lab['uuid'],
'remoteuser.%s' % email,
'submits_for.%s' % lab['uuid'],
'system.Authenticated',
'system.Everyone',
'userid.%s' % submitter['uuid'],
'viewing_group.4DN',
]
def test_jsonld_context(testapp):
res = testapp.get('/terms/')
assert res.json
def test_jsonld_term(testapp):
res = testapp.get('/terms/submitted_by')
assert res.json
@pytest.mark.parametrize('item_type', TYPE_LENGTH)
def test_profiles(testapp, item_type):
# this will only be non-abstract types
res = testapp.get('/profiles/%s.json' % item_type).maybe_follow(status=200)
errors = Draft4Validator.check_schema(res.json)
assert not errors
# added from snovault.schema_views._annotated_schema
assert 'rdfs:seeAlso' in res.json
assert 'rdfs:subClassOf' in res.json
assert 'children' in res.json
assert res.json['isAbstract'] is False
def test_profiles_all(testapp, registry):
res = testapp.get('/profiles/').maybe_follow(status=200)
# make sure all types are present, including abstract types
for ti in registry[TYPES].by_item_type.values():
assert ti.name in res.json
assert res.json[ti.name]['isAbstract'] is False
for ti in registry[TYPES].by_abstract_type.values():
assert ti.name in res.json
assert res.json[ti.name]['isAbstract'] is True
def test_bad_frame(testapp, human):
res = testapp.get(human['@id'] + '?frame=bad', status=404)
assert res.json['detail'] == '?frame=bad'
|
{
"content_hash": "1bd52535f2c1702f25b383391889f0c2",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 118,
"avg_line_length": 30.18360655737705,
"alnum_prop": 0.6441451227460352,
"repo_name": "4dn-dcic/fourfront",
"id": "776cd1cfbf4be5b938948bc8e2770fb9d82f9f55",
"size": "9206",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/encoded/tests/test_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Common Workflow Language",
"bytes": "15818"
},
{
"name": "Dockerfile",
"bytes": "6312"
},
{
"name": "HTML",
"bytes": "11048"
},
{
"name": "JavaScript",
"bytes": "2106661"
},
{
"name": "Makefile",
"bytes": "9079"
},
{
"name": "PLpgSQL",
"bytes": "12067"
},
{
"name": "Python",
"bytes": "1758496"
},
{
"name": "SCSS",
"bytes": "224522"
},
{
"name": "Shell",
"bytes": "19014"
}
],
"symlink_target": ""
}
|
__author__ = 'arul'
import sys
import json
import time
from fabric.api import *
from fabric.contrib import files
import re
from fabric.tasks import Task
"""
Basically its doing mongo operations of Backup and Restore
"""
env.use_ssh_config = True
class MongoTask(Task):
DB_NAME = MONGO_HOST = MONGO_USER = MONGO_PASSWORD = None
def __init__(self, func, *args, **kwargs):
super(MongoTask, self).__init__(*args, **kwargs)
self.func = func
def run(self, *args, **kwargs):
global DB_NAME, MONGO_HOST, MONGO_USER, MONGO_PASSWORD
if "dbname" in kwargs:
self.dbname = kwargs["dbname"]
DB_NAME = kwargs["dbname"]
if "mongo_host" in kwargs:
self.mongo_host = kwargs["mongo_host"]
MONGO_HOST = kwargs["mongo_host"]
if "mongo_user" in kwargs:
self.mongo_user = kwargs["mongo_user"]
MONGO_USER = kwargs["mongo_user"]
if "mongo_password" in kwargs:
self.mongo_password = kwargs["mongo_password"]
MONGO_PASSWORD = kwargs["mongo_password"]
return self.func(*args, **kwargs)
def __list_collection__(dbname):
"""
List all the mongo collections from the given database.
:param dbname:
:return:
"""
coll_str = run("""mongo %s --eval "printjson(db.getCollectionNames())" --quiet""" % dbname)
if coll_str:
collections = json.loads(coll_str)
# remove system.* collections
for name in collections:
match = re.search("system.*", name)
if match:
collections.remove(name)
return collections
return None
def __choose_collections__(dbname):
collections = __list_collection__(dbname)
if len(collections) == 0:
print "No collections with in DB : %s" % dbname
return None
collections_dict = dict()
i = 1
for collection in collections:
collections_dict[i] = collection
i += 1
print "Collections : "
for key in sorted(collections_dict.keys()):
print "\t %s. %s" % (key, collections_dict[key])
collection_nos = prompt("Which collections you want to select. For ex: 1-3,6-8,10?", validate=r'^[0-9,-]+$')
rangewithend = lambda start, end: range(start, end+1)
# 1-3,6-8,10
if collection_nos:
collection_ranges = collection_nos.split(',')
extracted_range = list()
for r in collection_ranges:
rl = [int(i) for i in r.split('-')]
rl.sort()
if len(rl) > 1:
extracted_range = extracted_range + rangewithend(rl[0], rl[1])
else:
extracted_range = extracted_range + rl
extracted_range = list(set(extracted_range))
selected_collections = list()
for no in extracted_range:
selected_collections.append(collections_dict[no])
print "Selected Collections : "
print "\t %s" % selected_collections
yesorno = prompt("Do you want to continue. y|n?", validate=r'y|n')
if yesorno == "n":
return None
return selected_collections
@task(alias="list", task_class=MongoTask)
@with_settings(hide('stdout'), warn_only=True)
def _list(dbname=None, mongo_host=None, mongo_user=None, mongo_password=None):
collections = __list_collection__(dbname)
if len(collections) == 0:
print "No collections with in DB : %s" % DB_NAME
return None
print "--"*20
print "%-20s" % "Collection Name"
print "--"*20
for name in collections:
print "%-20s" % name
print "--"*20
@task(alias="count", task_class=MongoTask)
@with_settings(hide('stdout'), warn_only=True)
def _count(dbname=None, mongo_host=None, mongo_user=None, mongo_password=None):
pass
collections = __list_collection__(dbname)
if len(collections) == 0:
print "No collections with in DB : %s" % DB_NAME
return None
collections_count = list()
for name in collections:
no_of_docs = run("""mongo %s --eval "db.%s.count()" --quiet""" % (dbname, name))
collections_count.append((name, no_of_docs))
print "--"*40
print "%-40s %10s" % ("Collection Name", "Count")
print "--"*40
for count_tuble in collections_count:
print "%-40s %10d" % (count_tuble[0], int(count_tuble[1]))
print "--"*40
@task(default=True, task_class=MongoTask)
@with_settings(hide('stdout'), warn_only=True)
def backup(dbname=None, mongo_host=None, mongo_user=None, mongo_password=None):
_backup(dbname)
def _backup(dbname):
collection_names = __choose_collections__(dbname)
if collection_names is None:
return
timestamp = time.time()
backup_dir = "/opt/%s" % timestamp
backup_dir = prompt("Change backup directory?", default=backup_dir)
run("mkdir -p %s" % backup_dir)
for name in collection_names:
print "Backup %s" % name
run("mongodump --collection %s --db %s --out %s" % (name, dbname, backup_dir))
return backup_dir, collection_names
@task(task_class=MongoTask)
@with_settings(hide('stdout'), warn_only=True)
def backuprestore(dbname=None, mongo_host=None, mongo_user=None, mongo_password=None, target_collection=None):
if target_collection is None:
print "Please give `target_collection` argument."
return
backup_dir, collection_names = _backup(dbname)
for name in collection_names:
pass
dump_file_path = "%s/%s/%s.bson" % (backup_dir, dbname, name)
run("mongorestore -d %s -c %s %s" % (dbname, target_collection, dump_file_path))
@task(task_class=MongoTask)
@with_settings(hide('stdout'), warn_only=True)
def drop(dbname=None, mongo_host=None, mongo_user=None, mongo_password=None):
collection_names = __choose_collections__(dbname)
if collection_names is None:
return
yesorno = prompt("Are you sure want to drop above collections. y|n?", validate=r'y|n')
if yesorno == "n":
return None
for name in collection_names:
run("""mongo %s --eval "printjson(db.%s.drop())" --quiet""" % (dbname, name))
if __name__ == '__main__':
print "Usage: fab -H hostname -f %s backup:dbname=database-name" % sys.argv[0]
print "Usage: fab -H hostname -f %s backuprestore:dbname=database-name,target_collection=collection-name" % sys.argv[0]
print "Usage: fab -H hostname -f %s drop:dbname=database-name" % sys.argv[0]
|
{
"content_hash": "898aac643e34ac4523542fb95e25fbe7",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 123,
"avg_line_length": 30.688995215311003,
"alnum_prop": 0.6177112566261304,
"repo_name": "arulrajnet/operationalscripts",
"id": "116c28c922936dccea6fd4eb40d5b018d389009a",
"size": "6439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/mongo_collection_operations.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12514"
},
{
"name": "Shell",
"bytes": "14175"
}
],
"symlink_target": ""
}
|
from byteio import ByteReader
import os
class ReferencePoint(object):
def __init__(self, reader):
self.name = reader.read_string().decode('utf-8')
self.x = reader.read_int32()
self.y = reader.read_int32()
self.z = reader.read_int32()
class Palette(object):
def __init__(self, reader, has_names=True):
self.palette = []
for _ in range(256):
r = reader.read_uint8()
g = reader.read_uint8()
b = reader.read_uint8()
self.palette.append((r, g, b))
if not has_names:
self.names = None
return
self.names = []
for _ in range(256):
self.names.append(reader.read_string().decode('utf-8'))
def write(self, writer):
for (r, g, b) in self.palette:
writer.write_uint8(r)
writer.write_uint8(g)
writer.write_uint8(b)
if not self.names:
return
for name in self.names:
writer.write_string(name.encode('utf-8'))
PALETTE_FILE = os.path.join(os.path.dirname(__file__), '..', 'palette.dat')
def read_global_palette(filename=None):
if filename is None:
filename = PALETTE_FILE
data = open(filename, 'rb').read()
reader = ByteReader(data)
return Palette(reader)
class VoxelModel(object):
def __init__(self, reader):
self.x_size = reader.read_uint32()
self.y_size = reader.read_uint32()
self.z_size = reader.read_uint32()
self.x_offset = reader.read_int32()
self.y_offset = reader.read_int32()
self.z_offset = reader.read_int32()
self.data = bytearray()
self.blocks = {}
self.palette = []
for x in range(self.x_size):
for y in range(self.y_size):
for z in range(self.z_size):
v = reader.read_uint8()
self.data.append(v)
if v == 255:
continue
self.blocks[(x, y, z)] = v
self.palette = Palette(reader, False).palette
self.points = []
for _ in range(reader.read_uint8()):
self.points.append(ReferencePoint(reader))
def is_solid(self, x, y, z):
return self.blocks.get((x, y, z), None) is not None
|
{
"content_hash": "fb3cca1f87b366e166cf8da45734a66f",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 75,
"avg_line_length": 29.76923076923077,
"alnum_prop": 0.5357450473729544,
"repo_name": "matpow2/voxie",
"id": "aae6ee7805c33a060d86b1defbd6bf2740dc4f29",
"size": "3417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/voxmodel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2027834"
},
{
"name": "C#",
"bytes": "8373"
},
{
"name": "C++",
"bytes": "5880613"
},
{
"name": "CMake",
"bytes": "40882"
},
{
"name": "Lua",
"bytes": "2034"
},
{
"name": "Objective-C",
"bytes": "40098"
}
],
"symlink_target": ""
}
|
from unittest import TestCase
from intent.corpora.POSCorpus import process_wsj_file
from intent.utils.dicts import CountDict
__author__ = 'rgeorgi'
class parse_wsj_tests(TestCase):
def parse_test(self):
path = '/Users/rgeorgi/Documents/treebanks/LDC95T07/RAW/combined/wsj/00/wsj_0001.mrg'
tc = CountDict()
def count_tokens(tokens):
for token in tokens:
tc.add(token.label)
process_wsj_file(path, count_tokens)
# There should be 31 total tokens in this file.
self.assertEqual(31, tc.total())
self.assertEqual(tc['.'], 2)
|
{
"content_hash": "0aa1e53ac05c0c4829db22883c024599",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 93,
"avg_line_length": 24.72,
"alnum_prop": 0.6456310679611651,
"repo_name": "rgeorgi/intent",
"id": "01c91ced2e9e24e536ae7671aadda2a4f6c9b812",
"size": "618",
"binary": false,
"copies": "1",
"ref": "refs/heads/toward-0.4",
"path": "intent/tests/pos_parse_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "573977"
}
],
"symlink_target": ""
}
|
'''Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
_10sec = False
batch_size = 128
num_classes = 10
epochs = 20
if _10sec:
epochs = 1
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
if _10sec:
x_train = x_train[:100]
y_train = y_train[:100]
x_test = x_test[:20]
y_test = y_test[:20]
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(784,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
|
{
"content_hash": "790a5f0799a0bfeb07f1051d1f2e36be",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 60,
"avg_line_length": 27.136363636363637,
"alnum_prop": 0.6683417085427136,
"repo_name": "guildai/guild",
"id": "c81a493908271f22e68af1316837d639efbd5c7c",
"size": "1791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/get-started-use-guild/mnist_mlp_10sec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "416"
},
{
"name": "JavaScript",
"bytes": "29682"
},
{
"name": "Makefile",
"bytes": "2621"
},
{
"name": "Python",
"bytes": "736181"
},
{
"name": "Shell",
"bytes": "1074"
},
{
"name": "Vue",
"bytes": "48469"
}
],
"symlink_target": ""
}
|
import re
from unittest import TestCase
from redshift_sqlalchemy.dialect import CopyCommand
class TestCopyCommand(TestCase):
def test_basic_copy_case(self):
expected_result = re.sub(r'\s+', ' ',
"COPY schema1.t1 FROM 's3://mybucket/data/listing/' "
"CREDENTIALS 'aws_access_key_id=cookies;aws_secret_access_key=cookies' CSV "
"TRUNCATECOLUMNS EMPTYASNULL BLANKSASNULL DELIMITER ',' IGNOREHEADER 0 ;").strip()
copy = CopyCommand('schema1', 't1', 's3://mybucket/data/listing/', 'cookies', 'cookies')
copy_str = re.sub(r'\s+', ' ', str(copy)).strip()
self.assertEqual(expected_result, copy_str)
|
{
"content_hash": "57d617bf7c66e75c08f95a51d38cf480",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 116,
"avg_line_length": 48.86666666666667,
"alnum_prop": 0.5961800818553888,
"repo_name": "hearsaycorp/redshift_sqlalchemy",
"id": "ef212e174a890d25f4b0589605869cfe38159963",
"size": "733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_copy_command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28676"
}
],
"symlink_target": ""
}
|
from test_framework.mininode import *
from test_framework.test_framework import InfinitumTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase
'''
SendHeadersTest -- test behavior of headers messages to announce blocks.
Setup:
- Two nodes, two p2p connections to node0. One p2p connection should only ever
receive inv's (omitted from testing description below, this is our control).
Second node is used for creating reorgs.
Part 1: No headers announcements before "sendheaders"
a. node mines a block [expect: inv]
send getdata for the block [expect: block]
b. node mines another block [expect: inv]
send getheaders and getdata [expect: headers, then block]
c. node mines another block [expect: inv]
peer mines a block, announces with header [expect: getdata]
d. node mines another block [expect: inv]
Part 2: After "sendheaders", headers announcements should generally work.
a. peer sends sendheaders [expect: no response]
peer sends getheaders with current tip [expect: no response]
b. node mines a block [expect: tip header]
c. for N in 1, ..., 10:
* for announce-type in {inv, header}
- peer mines N blocks, announces with announce-type
[ expect: getheaders/getdata or getdata, deliver block(s) ]
- node mines a block [ expect: 1 header ]
Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer.
- For response-type in {inv, getheaders}
* node mines a 7 block reorg [ expect: headers announcement of 8 blocks ]
* node mines an 8-block reorg [ expect: inv at tip ]
* peer responds with getblocks/getdata [expect: inv, blocks ]
* node mines another block [ expect: inv at tip, peer sends getdata, expect: block ]
* node mines another block at tip [ expect: inv ]
* peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers]
* peer requests block [ expect: block ]
* node mines another block at tip [ expect: inv, peer sends getdata, expect: block ]
* peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block]
* node mines 1 block [expect: 1 header, peer responds with getdata]
Part 4: Test direct fetch behavior
a. Announce 2 old block headers.
Expect: no getdata requests.
b. Announce 3 new blocks via 1 headers message.
Expect: one getdata request for all 3 blocks.
(Send blocks.)
c. Announce 1 header that forks off the last two blocks.
Expect: no response.
d. Announce 1 more header that builds on that fork.
Expect: one getdata request for two blocks.
e. Announce 16 more headers that build on that fork.
Expect: getdata request for 14 more blocks.
f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
a. Repeat 10 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
b. Then send 9 more headers that don't connect.
Expect: getheaders message each time.
c. Announce a header that does connect.
Expect: no response.
d. Announce 49 headers that don't connect.
Expect: getheaders message each time.
e. Announce one more that doesn't connect.
Expect: disconnect.
'''
class BaseNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.last_inv = None
self.last_headers = None
self.last_block = None
self.ping_counter = 1
self.last_pong = msg_pong(0)
self.last_getdata = None
self.sleep_time = 0.05
self.block_announced = False
self.last_getheaders = None
self.disconnected = False
def clear_last_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_inv = None
self.last_headers = None
def add_connection(self, conn):
self.connection = conn
# Request data for a list of block hashes
def get_data(self, block_hashes):
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
self.connection.send_message(msg)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
self.connection.send_message(msg)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_inv(self, conn, message):
self.last_inv = message
self.block_announced = True
def on_headers(self, conn, message):
self.last_headers = message
self.block_announced = True
def on_block(self, conn, message):
self.last_block = message.block
self.last_block.calc_sha256()
def on_getdata(self, conn, message):
self.last_getdata = message
def on_pong(self, conn, message):
self.last_pong = message
def on_getheaders(self, conn, message):
self.last_getheaders = message
def on_close(self, conn):
self.disconnected = True
# Test whether the last announcement we received had the
# right header or the right inv
# inv and headers should be lists of block hashes
def check_last_announcement(self, headers=None, inv=None):
expect_headers = headers if headers != None else []
expect_inv = inv if inv != None else []
test_function = lambda: self.block_announced
self.sync(test_function)
with mininode_lock:
self.block_announced = False
success = True
compare_inv = []
if self.last_inv != None:
compare_inv = [x.hash for x in self.last_inv.inv]
if compare_inv != expect_inv:
success = False
hash_headers = []
if self.last_headers != None:
# treat headers as a list of block hashes
hash_headers = [ x.sha256 for x in self.last_headers.headers ]
if hash_headers != expect_headers:
success = False
self.last_inv = None
self.last_headers = None
return success
# Syncing helpers
def sync(self, test_function, timeout=60):
while timeout > 0:
with mininode_lock:
if test_function():
return
time.sleep(self.sleep_time)
timeout -= self.sleep_time
raise AssertionError("Sync failed to complete")
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_pong.nonce == self.ping_counter
self.sync(test_function, timeout)
self.ping_counter += 1
return
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
self.sync(test_function, timeout)
return
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_getheaders != None
self.sync(test_function, timeout)
return
def wait_for_getdata(self, hash_list, timeout=60):
if hash_list == []:
return
test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list
self.sync(test_function, timeout)
return
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.disconnected
self.sync(test_function, timeout)
return
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
self.send_message(headers_message)
def send_getblocks(self, locator):
getblocks_message = msg_getblocks()
getblocks_message.locator.vHave = locator
self.send_message(getblocks_message)
# InvNode: This peer should only ever receive inv's, because it doesn't ever send a
# "sendheaders" message.
class InvNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
# TestNode: This peer is the one we use for most of the testing.
class TestNode(BaseNode):
def __init__(self):
BaseNode.__init__(self)
class SendHeadersTest(InfinitumTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.nodes = []
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [["-debug", "-logtimemicros=1"]]*2)
connect_nodes(self.nodes[0], 1)
# mine count blocks and return the new tip
def mine_blocks(self, count):
# Clear out last block announcement from each p2p listener
[ x.clear_last_announcement() for x in self.p2p_connections ]
self.nodes[0].generate(count)
return int(self.nodes[0].getbestblockhash(), 16)
# mine a reorg that invalidates length blocks (replacing them with
# length+1 blocks).
# Note: we clear the state of our p2p connections after the
# to-be-reorged-out blocks are mined, so that we don't break later tests.
# return the list of block hashes newly mined
def mine_reorg(self, length):
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
sync_blocks(self.nodes, wait=0.1)
[x.clear_last_announcement() for x in self.p2p_connections]
tip_height = self.nodes[1].getblockcount()
hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
self.nodes[1].invalidateblock(hash_to_invalidate)
all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain
sync_blocks(self.nodes, wait=0.1)
return [int(x, 16) for x in all_hashes]
def run_test(self):
# Setup the p2p connections and start up the network thread.
inv_node = InvNode()
test_node = TestNode()
self.p2p_connections = [inv_node, test_node]
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node))
# Set nServices to 0 for test_node, so no block download will occur outside of
# direct fetching
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0))
inv_node.add_connection(connections[0])
test_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
inv_node.wait_for_verack()
test_node.wait_for_verack()
tip = int(self.nodes[0].getbestblockhash(), 16)
# PART 1
# 1. Mine a block; expect inv announcements each time
print("Part 1: headers don't start before sendheaders message...")
for i in range(4):
old_tip = tip
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
# Try a few different responses; none should affect next announcement
if i == 0:
# first request the block
test_node.get_data([tip])
test_node.wait_for_block(tip, timeout=5)
elif i == 1:
# next try requesting header and block
test_node.get_headers(locator=[old_tip], hashstop=tip)
test_node.get_data([tip])
test_node.wait_for_block(tip)
test_node.clear_last_announcement() # since we requested headers...
elif i == 2:
# this time announce own block via headers
height = self.nodes[0].getblockcount()
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
new_block = create_block(tip, create_coinbase(height+1), block_time)
new_block.solve()
test_node.send_header_for_blocks([new_block])
test_node.wait_for_getdata([new_block.sha256], timeout=5)
test_node.send_message(msg_block(new_block))
test_node.sync_with_ping() # make sure this block is processed
inv_node.clear_last_announcement()
test_node.clear_last_announcement()
print("Part 1: success!")
print("Part 2: announce blocks with headers after sendheaders message...")
# PART 2
# 2. Send a sendheaders message and test that headers announcements
# commence and keep working.
test_node.send_message(msg_sendheaders())
prev_tip = int(self.nodes[0].getbestblockhash(), 16)
test_node.get_headers(locator=[prev_tip], hashstop=0)
test_node.sync_with_ping()
# Now that we've synced headers, headers announcements should work
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height = self.nodes[0].getblockcount()+1
block_time += 10 # Advance far enough ahead
for i in range(10):
# Mine i blocks, and alternate announcing either via
# inv (of tip) or via headers. After each, new blocks
# mined by the node should successfully be announced
# with block header, even though the blocks are never requested
for j in range(2):
blocks = []
for b in range(i+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
if j == 0:
# Announce via inv
test_node.send_block_inv(tip)
test_node.wait_for_getdata([tip], timeout=5)
# Test that duplicate inv's won't result in duplicate
# getdata requests, or duplicate headers announcements
inv_node.send_block_inv(tip)
# Should have received a getheaders as well!
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks[0:-1]], timeout=5)
[ inv_node.send_block_inv(x.sha256) for x in blocks[0:-1] ]
inv_node.sync_with_ping()
else:
# Announce via headers
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=5)
# Test that duplicate headers won't result in duplicate
# getdata requests (the check is further down)
inv_node.send_header_for_blocks(blocks)
inv_node.sync_with_ping()
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
inv_node.sync_with_ping()
# This block should not be announced to the inv node (since it also
# broadcast it)
assert_equal(inv_node.last_inv, None)
assert_equal(inv_node.last_headers, None)
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
height += 1
block_time += 1
print("Part 2: success!")
print("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...")
# PART 3. Headers announcements can stop after large reorg, and resume after
# getheaders or inv from peer.
for j in range(2):
# First try mining a reorg that can propagate with header announcement
new_block_hashes = self.mine_reorg(length=7)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True)
block_time += 8
# Mine a too-large reorg, which should be announced with a single inv
new_block_hashes = self.mine_reorg(length=8)
tip = new_block_hashes[-1]
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
block_time += 9
fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"]
fork_point = int(fork_point, 16)
# Use getblocks/getdata
test_node.send_getblocks(locator = [fork_point])
assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True)
test_node.get_data(new_block_hashes)
test_node.wait_for_block(new_block_hashes[-1])
for i in range(3):
# Mine another block, still should get only an inv
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(inv=[tip]), True)
if i == 0:
# Just get the data -- shouldn't cause headers announcements to resume
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 1:
# Send a getheaders message that shouldn't trigger headers announcements
# to resume (best header sent will be too old)
test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1])
test_node.get_data([tip])
test_node.wait_for_block(tip)
elif i == 2:
test_node.get_data([tip])
test_node.wait_for_block(tip)
# This time, try sending either a getheaders to trigger resumption
# of headers announcements, or mine a new block and inv it, also
# triggering resumption of headers announcements.
if j == 0:
test_node.get_headers(locator=[tip], hashstop=0)
test_node.sync_with_ping()
else:
test_node.send_block_inv(tip)
test_node.sync_with_ping()
# New blocks should now be announced with header
tip = self.mine_blocks(1)
assert_equal(inv_node.check_last_announcement(inv=[tip]), True)
assert_equal(test_node.check_last_announcement(headers=[tip]), True)
print("Part 3: success!")
print("Part 4: Testing direct fetch behavior...")
tip = self.mine_blocks(1)
height = self.nodes[0].getblockcount() + 1
last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time']
block_time = last_time + 1
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
for b in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
inv_node.send_message(msg_block(blocks[-1]))
inv_node.sync_with_ping() # Make sure blocks are processed
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
# should not have received any getdata messages
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# This time, direct fetch should work
blocks = []
for b in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
test_node.send_header_for_blocks(blocks)
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=test_node.sleep_time)
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
# Now announce a header that forks the last two blocks
tip = blocks[0].sha256
height -= 1
blocks = []
# Create extra blocks for later
for b in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Announcing one block on fork should not trigger direct fetch
# (less work than tip)
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[0:1])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
# Announcing one more block on fork should trigger direct fetch for
# both blocks (same work as tip)
test_node.send_header_for_blocks(blocks[1:2])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=test_node.sleep_time)
# Announcing 16 more headers should trigger direct fetch for 14 more
# blocks
test_node.send_header_for_blocks(blocks[2:18])
test_node.sync_with_ping()
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=test_node.sleep_time)
# Announcing 1 more header should not trigger any response
test_node.last_getdata = None
test_node.send_header_for_blocks(blocks[18:19])
test_node.sync_with_ping()
with mininode_lock:
assert_equal(test_node.last_getdata, None)
print("Part 4: success!")
# Now deliver all those blocks we announced.
[ test_node.send_message(msg_block(x)) for x in blocks ]
print("Part 5: Testing handling of unconnecting headers")
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
for i in range(10):
test_node.last_getdata = None
blocks = []
# Create two more blocks.
for j in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
# Send the header of the second block -> this won't connect.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[1]])
test_node.wait_for_getheaders(timeout=1)
test_node.send_header_for_blocks(blocks)
test_node.wait_for_getdata([x.sha256 for x in blocks])
[ test_node.send_message(msg_block(x)) for x in blocks ]
test_node.sync_with_ping()
assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256)
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
for j in range(MAX_UNCONNECTING_HEADERS+1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
for i in range(1, MAX_UNCONNECTING_HEADERS):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders(timeout=1)
# Next header will connect, should re-set our count:
test_node.send_header_for_blocks([blocks[0]])
# Remove the first two entries (blocks[1] would connect):
blocks = blocks[2:]
# Now try to see how many unconnecting headers we can send
# before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS
for i in range(5*MAX_UNCONNECTING_HEADERS - 1):
# Send a header that doesn't connect, check that we get a getheaders.
with mininode_lock:
test_node.last_getheaders = None
test_node.send_header_for_blocks([blocks[i%len(blocks)]])
test_node.wait_for_getheaders(timeout=1)
# Eventually this stops working.
with mininode_lock:
self.last_getheaders = None
test_node.send_header_for_blocks([blocks[-1]])
# Should get disconnected
test_node.wait_for_disconnect()
with mininode_lock:
self.last_getheaders = True
print("Part 5: success!")
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert_equal(inv_node.last_getdata, None)
if __name__ == '__main__':
SendHeadersTest().main()
|
{
"content_hash": "ea639fc097946754844e8a73e940929d",
"timestamp": "",
"source": "github",
"line_count": 618,
"max_line_length": 116,
"avg_line_length": 41.82200647249191,
"alnum_prop": 0.6045809796486884,
"repo_name": "fcecin/infinitum",
"id": "3e3935eccf1bab005b99c83328b4ed52d867f4be",
"size": "26061",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/sendheaders.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "679983"
},
{
"name": "C++",
"bytes": "4544909"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "3870"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2102"
},
{
"name": "M4",
"bytes": "175841"
},
{
"name": "Makefile",
"bytes": "96055"
},
{
"name": "Objective-C",
"bytes": "3783"
},
{
"name": "Objective-C++",
"bytes": "7244"
},
{
"name": "Protocol Buffer",
"bytes": "2316"
},
{
"name": "Python",
"bytes": "882788"
},
{
"name": "QMake",
"bytes": "2022"
},
{
"name": "Shell",
"bytes": "34286"
}
],
"symlink_target": ""
}
|
from tests.lib import pyversion
from pip.vcs.bazaar import Bazaar
if pyversion >= '3':
VERBOSE_FALSE = False
else:
VERBOSE_FALSE = 0
def test_bazaar_simple_urls():
"""
Test bzr url support.
SSH and launchpad have special handling.
"""
http_bzr_repo = Bazaar(
url='bzr+http://bzr.myproject.org/MyProject/trunk/#egg=MyProject'
)
https_bzr_repo = Bazaar(
url='bzr+https://bzr.myproject.org/MyProject/trunk/#egg=MyProject'
)
ssh_bzr_repo = Bazaar(
url='bzr+ssh://bzr.myproject.org/MyProject/trunk/#egg=MyProject'
)
ftp_bzr_repo = Bazaar(
url='bzr+ftp://bzr.myproject.org/MyProject/trunk/#egg=MyProject'
)
sftp_bzr_repo = Bazaar(
url='bzr+sftp://bzr.myproject.org/MyProject/trunk/#egg=MyProject'
)
launchpad_bzr_repo = Bazaar(
url='bzr+lp:MyLaunchpadProject#egg=MyLaunchpadProject'
)
assert http_bzr_repo.get_url_rev() == (
'http://bzr.myproject.org/MyProject/trunk/', None,
)
assert https_bzr_repo.get_url_rev() == (
'https://bzr.myproject.org/MyProject/trunk/', None,
)
assert ssh_bzr_repo.get_url_rev() == (
'bzr+ssh://bzr.myproject.org/MyProject/trunk/', None,
)
assert ftp_bzr_repo.get_url_rev() == (
'ftp://bzr.myproject.org/MyProject/trunk/', None,
)
assert sftp_bzr_repo.get_url_rev() == (
'sftp://bzr.myproject.org/MyProject/trunk/', None,
)
assert launchpad_bzr_repo.get_url_rev() == (
'lp:MyLaunchpadProject', None,
)
|
{
"content_hash": "c9805bbb3bf9b134cd817c9810a6eabf",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 74,
"avg_line_length": 29.71153846153846,
"alnum_prop": 0.6168284789644013,
"repo_name": "1stvamp/pip",
"id": "c28a23c65a1dac70def99c6fe4f11388e94fe5ca",
"size": "1545",
"binary": false,
"copies": "6",
"ref": "refs/heads/develop",
"path": "tests/unit/test_vcs.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
Classes for making VMware VI SOAP calls.
"""
import httplib
import urllib2
from oslo.config import cfg
import suds
from nova.i18n import _
from nova import utils
from nova.virt.vmwareapi import error_util
RESP_NOT_XML_ERROR = 'Response is "text/html", not "text/xml"'
CONN_ABORT_ERROR = 'Software caused connection abort'
ADDRESS_IN_USE_ERROR = 'Address already in use'
vmwareapi_wsdl_loc_opt = cfg.StrOpt('wsdl_location',
help='Optional VIM Service WSDL Location '
'e.g http://<server>/vimService.wsdl. '
'Optional over-ride to default location for bug work-arounds')
CONF = cfg.CONF
CONF.register_opt(vmwareapi_wsdl_loc_opt, 'vmware')
def get_moref(value, type):
"""Get managed object reference."""
moref = suds.sudsobject.Property(value)
moref._type = type
return moref
def object_to_dict(obj, list_depth=1):
"""Convert Suds object into serializable format.
The calling function can limit the amount of list entries that
are converted.
"""
d = {}
for k, v in suds.sudsobject.asdict(obj).iteritems():
if hasattr(v, '__keylist__'):
d[k] = object_to_dict(v, list_depth=list_depth)
elif isinstance(v, list):
d[k] = []
used = 0
for item in v:
used = used + 1
if used > list_depth:
break
if hasattr(item, '__keylist__'):
d[k].append(object_to_dict(item, list_depth=list_depth))
else:
d[k].append(item)
else:
d[k] = v
return d
class VIMMessagePlugin(suds.plugin.MessagePlugin):
def addAttributeForValue(self, node):
# suds does not handle AnyType properly.
# VI SDK requires type attribute to be set when AnyType is used
if node.name == 'value':
node.set('xsi:type', 'xsd:string')
def marshalled(self, context):
"""suds will send the specified soap envelope.
Provides the plugin with the opportunity to prune empty
nodes and fixup nodes before sending it to the server.
"""
# suds builds the entire request object based on the wsdl schema.
# VI SDK throws server errors if optional SOAP nodes are sent
# without values, e.g. <test/> as opposed to <test>test</test>
context.envelope.prune()
context.envelope.walk(self.addAttributeForValue)
class Vim:
"""The VIM Object."""
def __init__(self,
protocol="https",
host="localhost",
port=443):
"""Creates the necessary Communication interfaces and gets the
ServiceContent for initiating SOAP transactions.
protocol: http or https
host : ESX IPAddress or Hostname
port : port for connection
"""
if not suds:
raise Exception(_("Unable to import suds."))
self._protocol = protocol
self._host_name = host
self.wsdl_url = Vim.get_wsdl_url(protocol, host, port)
self.url = Vim.get_soap_url(protocol, host, port)
self.client = suds.client.Client(self.wsdl_url, location=self.url,
plugins=[VIMMessagePlugin()])
self._service_content = self.retrieve_service_content()
def retrieve_service_content(self):
return self.RetrieveServiceContent("ServiceInstance")
@staticmethod
def get_wsdl_url(protocol, host_name, port):
"""Allows override of the wsdl location, making this static
means we can test the logic outside of the constructor
without forcing the test environment to have multiple valid
wsdl locations to test against.
:param protocol: https or http
:param host_name: localhost or other server name
:param port: port for connection
:return: string to WSDL location for vSphere WS Management API
"""
# optional WSDL location over-ride for work-arounds
if CONF.vmware.wsdl_location:
return CONF.vmware.wsdl_location
# calculate default WSDL location if no override supplied
return Vim.get_soap_url(protocol, host_name, port) + "/vimService.wsdl"
@staticmethod
def get_soap_url(protocol, host_name, port):
"""Calculates the location of the SOAP services
for a particular server. Created as a static
method for testing.
:param protocol: https or http
:param host_name: localhost or other vSphere server name
:param port: port for connection
:return: the url to the active vSphere WS Management API
"""
if utils.is_valid_ipv6(host_name):
return '%s://[%s]:%d/sdk' % (protocol, host_name, port)
return '%s://%s:%d/sdk' % (protocol, host_name, port)
def get_service_content(self):
"""Gets the service content object."""
return self._service_content
def __getattr__(self, attr_name):
"""Makes the API calls and gets the result."""
def vim_request_handler(managed_object, **kwargs):
"""Builds the SOAP message and parses the response for fault
checking and other errors.
managed_object : Managed Object Reference or Managed
Object Name
**kwargs : Keyword arguments of the call
"""
# Dynamic handler for VI SDK Calls
try:
request_mo = self._request_managed_object_builder(
managed_object)
request = getattr(self.client.service, attr_name)
response = request(request_mo, **kwargs)
# To check for the faults that are part of the message body
# and not returned as Fault object response from the ESX
# SOAP server
if hasattr(error_util.FaultCheckers,
attr_name.lower() + "_fault_checker"):
fault_checker = getattr(error_util.FaultCheckers,
attr_name.lower() + "_fault_checker")
fault_checker(response)
return response
# Catch the VimFaultException that is raised by the fault
# check of the SOAP response
except error_util.VimFaultException:
raise
except suds.MethodNotFound:
raise
except suds.WebFault as excep:
doc = excep.document
fault_string = doc.childAtPath("/Envelope/Body/Fault/"
"faultstring").getText()
detail = doc.childAtPath("/Envelope/Body/Fault/detail")
fault_list = []
details = {}
if detail:
for fault in detail.getChildren():
fault_list.append(fault.get("type"))
for child in fault.getChildren():
details[child.name] = child.getText()
raise error_util.VimFaultException(fault_list, fault_string,
details)
except AttributeError as excep:
raise error_util.VimAttributeError(_("No such SOAP method "
"'%s' provided by VI SDK") % (attr_name), excep)
except (httplib.CannotSendRequest,
httplib.ResponseNotReady,
httplib.CannotSendHeader) as excep:
raise error_util.SessionOverLoadException(_("httplib "
"error in %s: ") % (attr_name), excep)
except (urllib2.URLError,
urllib2.HTTPError) as excep:
raise error_util.SessionConnectionException(_("urllib2 "
"error in %s: ") % (attr_name), excep)
except Exception as excep:
# Socket errors which need special handling for they
# might be caused by ESX API call overload
if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
str(excep).find(CONN_ABORT_ERROR)) != -1:
raise error_util.SessionOverLoadException(_("Socket "
"error in %s: ") % (attr_name), excep)
# Type error that needs special handling for it might be
# caused by ESX host API call overload
elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
raise error_util.SessionOverLoadException(_("Type "
"error in %s: ") % (attr_name), excep)
else:
raise error_util.VimException(
_("Exception in %s ") % (attr_name), excep)
return vim_request_handler
def _request_managed_object_builder(self, managed_object):
"""Builds the request managed object."""
# Request Managed Object Builder
if isinstance(managed_object, str):
mo = suds.sudsobject.Property(managed_object)
mo._type = managed_object
else:
mo = managed_object
return mo
def __repr__(self):
return "VIM Object"
def __str__(self):
return "VIM Object"
|
{
"content_hash": "b49ec517ed6e64a95da3965d942c96cc",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 79,
"avg_line_length": 40.14529914529915,
"alnum_prop": 0.565147966787311,
"repo_name": "viggates/nova",
"id": "d4aa456c2b08ff085828c5a7dd58b377b08147f3",
"size": "10083",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/virt/vmwareapi/vim.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14822788"
},
{
"name": "Shell",
"bytes": "18352"
}
],
"symlink_target": ""
}
|
import unittest
from . import base
class JsonTestCase(base.BaseParserTestCase, unittest.TestCase):
extension = 'json'
|
{
"content_hash": "5db65a38217e8d71e6687bb123b72b33",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 63,
"avg_line_length": 17.857142857142858,
"alnum_prop": 0.768,
"repo_name": "deanmalmgren/textract",
"id": "290a67eefb7e5c5a01dd554be5a0be7508851861",
"size": "125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "521"
},
{
"name": "HTML",
"bytes": "491919"
},
{
"name": "Makefile",
"bytes": "1548"
},
{
"name": "PostScript",
"bytes": "968"
},
{
"name": "Python",
"bytes": "58239"
},
{
"name": "Rich Text Format",
"bytes": "78792"
},
{
"name": "Shell",
"bytes": "3383"
}
],
"symlink_target": ""
}
|
import testsupport
import StringIO, unittest
import sqlite3 as sqlite
class LogFileTemplate:
def write(self, s):
pass
class LogFile:
def __init__(self):
pass
def init_LogFile():
LogFile.write = LogFileTemplate.write
class CommandLoggingTests(unittest.TestCase, testsupport.TestSupport):
def tearDown(self):
try:
self.cnx.close()
self.removefile()
except AttributeError:
pass
except sqlite.InterfaceError:
pass
def CheckNoWrite(self):
init_LogFile()
del LogFile.write
logger = LogFile()
try:
self.cnx = sqlite.connect(self.getfilename(),
command_logfile=logger)
self.fail("ValueError not raised")
except ValueError:
pass
def CheckWriteNotCallable(self):
logger = LogFile()
logger.write = 5
try:
self.cnx = sqlite.connect(self.getfilename(),
command_logfile=logger)
self.fail("ValueError not raised")
except ValueError:
pass
def CheckLoggingWorks(self):
logger = StringIO.StringIO()
expected_output = ";\n".join([
sqlite.main._BEGIN, "CREATE TABLE TEST(FOO INTEGER)",
"INSERT INTO TEST(FOO) VALUES (?)",
"ROLLBACK"]) + ";\n"
self.cnx = sqlite.connect(self.getfilename(),
command_logfile=logger)
cu = self.cnx.cursor()
cu.execute("CREATE TABLE TEST(FOO INTEGER)")
cu.execute("INSERT INTO TEST(FOO) VALUES (?)", (5,))
self.cnx.rollback()
logger.seek(0)
real_output = logger.read()
if expected_output != real_output:
self.fail("Logging didn't produce expected output.")
def suite():
command_logging_suite = unittest.makeSuite(CommandLoggingTests, "Check")
return command_logging_suite
def main():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
main()
|
{
"content_hash": "7e8a17f1e10e3e36e836224208b31ec8",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 76,
"avg_line_length": 25.936708860759495,
"alnum_prop": 0.582723279648609,
"repo_name": "fedora-conary/conary",
"id": "6ab82bccd9cc5628a5ed4d2e1f9c86c785d8ba4f",
"size": "2071",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "conary/pysqlite3/test/logging_tests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "481681"
},
{
"name": "C++",
"bytes": "8244"
},
{
"name": "CSS",
"bytes": "3920"
},
{
"name": "Erlang",
"bytes": "477"
},
{
"name": "Perl",
"bytes": "45629"
},
{
"name": "Python",
"bytes": "10586616"
},
{
"name": "Shell",
"bytes": "4657"
},
{
"name": "Standard ML",
"bytes": "2756"
}
],
"symlink_target": ""
}
|
"""
Pelix Utilities: Cached thread pool
:author: Thomas Calmant
:copyright: Copyright 2016, Thomas Calmant
:license: Apache License 2.0
:version: 0.6.4
..
Copyright 2016 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import logging
import threading
try:
# Python 3
# pylint: disable=F0401
import queue
except ImportError:
# Python 2
# pylint: disable=F0401
import Queue as queue
# Pelix
import pelix.utilities
# ------------------------------------------------------------------------------
# Documentation strings format
__docformat__ = "restructuredtext en"
# Module version
__version_info__ = (0, 6, 4)
__version__ = ".".join(str(x) for x in __version_info__)
# ------------------------------------------------------------------------------
class FutureResult(object):
"""
An object to wait for the result of a threaded execution
"""
def __init__(self, logger=None):
"""
Sets up the FutureResult object
:param logger: The Logger to use in case of error (optional)
"""
self._logger = logger or logging.getLogger(__name__)
self._done_event = pelix.utilities.EventData()
self.__callback = None
self.__extra = None
def __notify(self):
"""
Notify the given callback about the result of the execution
"""
if self.__callback is not None:
try:
self.__callback(self._done_event.data,
self._done_event.exception,
self.__extra)
except Exception as ex:
self._logger.exception("Error calling back method: %s", ex)
def set_callback(self, method, extra=None):
"""
Sets a callback method, called once the result has been computed or in
case of exception.
The callback method must have the following signature:
``callback(result, exception, extra)``.
:param method: The method to call back in the end of the execution
:param extra: Extra parameter to be given to the callback method
"""
self.__callback = method
self.__extra = extra
if self._done_event.is_set():
# The execution has already finished
self.__notify()
def execute(self, method, args, kwargs):
"""
Execute the given method and stores its result.
The result is considered "done" even if the method raises an exception
:param method: The method to execute
:param args: Method positional arguments
:param kwargs: Method keyword arguments
:raise Exception: The exception raised by the method
"""
# Normalize arguments
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
# Call the method
result = method(*args, **kwargs)
except Exception as ex:
# Something went wrong: propagate to the event and to the caller
self._done_event.raise_exception(ex)
raise
else:
# Store the result
self._done_event.set(result)
finally:
# In any case: notify the call back (if any)
self.__notify()
def done(self):
"""
Returns True if the job has finished, else False
"""
return self._done_event.is_set()
def result(self, timeout=None):
"""
Waits up to timeout for the result the threaded job.
Returns immediately the result if the job has already been done.
:param timeout: The maximum time to wait for a result (in seconds)
:raise OSError: The timeout raised before the job finished
:raise Exception: The exception encountered during the call, if any
"""
if self._done_event.wait(timeout):
return self._done_event.data
else:
raise OSError("Timeout raised")
# ------------------------------------------------------------------------------
class ThreadPool(object):
"""
Executes the tasks stored in a FIFO in a thread pool
"""
def __init__(self, max_threads, min_threads=1, queue_size=0, timeout=60,
logname=None):
"""
Sets up the thread pool.
Threads are kept alive 60 seconds (timeout argument).
:param max_threads: Maximum size of the thread pool
:param min_threads: Minimum size of the thread pool
:param queue_size: Size of the task queue (0 for infinite)
:param timeout: Queue timeout (in seconds, 60s by default)
:param logname: Name of the logger
:raise ValueError: Invalid number of threads
"""
# Validate parameters
try:
max_threads = int(max_threads)
if max_threads < 1:
raise ValueError("Pool size must be greater than 0")
except (TypeError, ValueError) as ex:
raise ValueError("Invalid pool size: {0}".format(ex))
try:
min_threads = int(min_threads)
if min_threads < 0:
min_threads = 0
elif min_threads > max_threads:
min_threads = max_threads
except (TypeError, ValueError) as ex:
raise ValueError("Invalid pool size: {0}".format(ex))
# The logger
self._logger = logging.getLogger(logname or __name__)
# The loop control event
self._done_event = threading.Event()
self._done_event.set()
# The task queue
try:
queue_size = int(queue_size)
except (TypeError, ValueError):
# Not a valid integer
queue_size = 0
self._queue = queue.Queue(queue_size)
self._timeout = timeout
self.__lock = threading.RLock()
# The thread pool
self._min_threads = min_threads
self._max_threads = max_threads
self._threads = []
# Thread count
self._thread_id = 0
# Current number of threads, active and alive
self.__nb_threads = 0
self.__nb_active_threads = 0
def start(self):
"""
Starts the thread pool. Does nothing if the pool is already started.
"""
if not self._done_event.is_set():
# Stop event not set: we're running
return
# Clear the stop event
self._done_event.clear()
# Compute the number of threads to start to handle pending tasks
nb_pending_tasks = self._queue.qsize()
if nb_pending_tasks > self._max_threads:
nb_threads = self._max_threads
elif nb_pending_tasks < self._min_threads:
nb_threads = self._min_threads
else:
nb_threads = nb_pending_tasks
# Create the threads
for _ in range(nb_threads):
self.__start_thread()
def __start_thread(self):
"""
Starts a new thread, if possible
"""
with self.__lock:
if self.__nb_threads >= self._max_threads:
# Can't create more threads
return False
if self._done_event.is_set():
# We're stopped: do nothing
return False
# Prepare thread and start it
name = "{0}-{1}".format(self._logger.name, self._thread_id)
self._thread_id += 1
thread = threading.Thread(target=self.__run, name=name)
thread.daemon = True
self._threads.append(thread)
thread.start()
return True
def stop(self):
"""
Stops the thread pool. Does nothing if the pool is already stopped.
"""
if self._done_event.is_set():
# Stop event set: we're stopped
return
# Set the stop event
self._done_event.set()
with self.__lock:
# Add something in the queue (to unlock the join())
try:
for _ in self._threads:
self._queue.put(self._done_event, True, self._timeout)
except queue.Full:
# There is already something in the queue
pass
# Copy the list of threads to wait for
threads = self._threads[:]
# Join threads outside the lock
for thread in threads:
while thread.is_alive():
# Wait 3 seconds
thread.join(3)
if thread.is_alive():
# Thread is still alive: something might be wrong
self._logger.warning("Thread %s is still alive...",
thread.name)
# Clear storage
del self._threads[:]
self.clear()
def enqueue(self, method, *args, **kwargs):
"""
Queues a task in the pool
:param method: Method to call
:return: A FutureResult object, to get the result of the task
:raise ValueError: Invalid method
:raise Full: The task queue is full
"""
if not hasattr(method, '__call__'):
raise ValueError("{0} has no __call__ member."
.format(method.__name__))
# Prepare the future result object
future = FutureResult(self._logger)
# Use a lock, as we might be "resetting" the queue
with self.__lock:
# Add the task to the queue
self._queue.put((method, args, kwargs, future), True,
self._timeout)
if self.__nb_active_threads == self.__nb_threads:
# All threads are taken: start a new one
self.__start_thread()
return future
def clear(self):
"""
Empties the current queue content.
Returns once the queue have been emptied.
"""
with self.__lock:
# Empty the current queue
try:
while True:
self._queue.get_nowait()
self._queue.task_done()
except queue.Empty:
# Queue is now empty
pass
# Wait for the tasks currently executed
self.join()
def join(self, timeout=None):
"""
Waits for all the tasks to be executed
:param timeout: Maximum time to wait (in seconds)
:return: True if the queue has been emptied, else False
"""
if self._queue.empty():
# Nothing to wait for...
return True
elif timeout is None:
# Use the original join
self._queue.join()
return True
else:
# Wait for the condition
with self._queue.all_tasks_done:
self._queue.all_tasks_done.wait(timeout)
return not bool(self._queue.unfinished_tasks)
def __run(self):
"""
The main loop
"""
with self.__lock:
self.__nb_threads += 1
while not self._done_event.is_set():
try:
# Wait for an action (blocking)
task = self._queue.get(True, self._timeout)
if task is self._done_event:
# Stop event in the queue: get out
self._queue.task_done()
with self.__lock:
self.__nb_threads -= 1
return
except queue.Empty:
# Nothing to do yet
pass
else:
with self.__lock:
self.__nb_active_threads += 1
# Extract elements
method, args, kwargs, future = task
try:
# Call the method
future.execute(method, args, kwargs)
except Exception as ex:
self._logger.exception("Error executing %s: %s",
method.__name__, ex)
finally:
# Mark the action as executed
self._queue.task_done()
# Thread is not active anymore
self.__nb_active_threads -= 1
# Clean up thread if necessary
with self.__lock:
if self.__nb_threads > self._min_threads:
# No more work for this thread, and we're above the
# minimum number of threads: stop this one
self.__nb_threads -= 1
return
with self.__lock:
# Thread stops
self.__nb_threads -= 1
|
{
"content_hash": "1cc2ca3b95863438af62ad451b1ba8bd",
"timestamp": "",
"source": "github",
"line_count": 416,
"max_line_length": 80,
"avg_line_length": 31.927884615384617,
"alnum_prop": 0.5271796416202379,
"repo_name": "isandlaTech/cohorte-devtools",
"id": "bc6031bc48121ee501c8289b5d6de32a2e9140c2",
"size": "13336",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qualifier/deploy/cohorte-home/repo/pelix/threadpool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "151318"
},
{
"name": "HTML",
"bytes": "113064"
},
{
"name": "Java",
"bytes": "172793"
},
{
"name": "JavaScript",
"bytes": "2165497"
},
{
"name": "Python",
"bytes": "13926564"
},
{
"name": "Shell",
"bytes": "1490"
}
],
"symlink_target": ""
}
|
"""User roles for `lino_xl.lib.polls`.
"""
from lino.core.roles import UserRole, SiteUser, SiteAdmin
class PollsUser(UserRole):
"Can see polls and create new responses."
pass
class PollsStaff(PollsUser):
"Can create new polls."
pass
class PollsAdmin(PollsStaff, SiteAdmin):
"Can configure polls functionality."
pass
|
{
"content_hash": "8727bef9239fb2d093a436e7724b5426",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 57,
"avg_line_length": 15.909090909090908,
"alnum_prop": 0.7,
"repo_name": "lino-framework/xl",
"id": "88f2df20463140a20d22f4d53718e13b33ac39a0",
"size": "466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lino_xl/lib/polls/roles.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "186625"
},
{
"name": "HTML",
"bytes": "1417287"
},
{
"name": "JavaScript",
"bytes": "1630929"
},
{
"name": "PHP",
"bytes": "40437"
},
{
"name": "Python",
"bytes": "2395471"
}
],
"symlink_target": ""
}
|
from app import db
import csv
from app.models.movie import Movie
from app.models.location import Location
from app.models.director import Director
from app.models.writer import Writer
from app.models.actor import Actor
def import_data_from_database():
"""
Build dictionaries from database
:return:
"""
# Init dictionaries
movies, actors, writers, directors, locations = {}, {}, {}, {}, {}
for movie in Movie.query.all():
# Save director information
movies[movie.name] = movie.id
for actor in Actor.query.all():
# Save actor information
actors[actor.name] = actor.id
for writer in Writer.query.all():
# Save writer information
writers[writer.name] = writer.id
for director in Director.query.all():
# Save director information
directors[director.name] = director.id
for location in Location.query.all():
locations[(location, location.movie_id)] = location.id
return movies, actors, writers, directors, locations
def import_data_from_csv(file_path):
"""
Import data from a csv file into database
:return:
"""
try:
with open(file_path) as csv_file:
reader = csv.reader(csv_file, delimiter=',')
# Init dictionaries
movies, actors, writers, directors, locations = import_data_from_database()
# FIXME : test header !
header = next(reader)
if header[0] != 'Title' or header[1] != 'Release Year':
return "Bad File.."
for row in reader:
# Read CSV line
name = row[0].strip()
location = row[2]
fun_facts = row[3]
# Movie already exists create new location
if name in movies:
if '' != location:
new_location = Location(location, fun_facts, movies[name])
db.session.add(new_location)
continue
# Read more information from csv line about movie
release_year = row[1]
production = row[4]
distributor = row[5]
director = row[6]
writer = row[7]
movie_actors = [row[8], row[9], row[10]]
# Create a new Movie
movie = Movie(name, release_year, production, distributor)
# Add director
if '' != director:
if director not in directors:
director = Director(director)
db.session.add(director)
db.session.flush()
# Save director id in local dictionary
directors[director.name] = director.id
# add director_id to movie
movie.add_director(director.id)
else:
movie.add_director(directors[director])
# Add writer
if '' != writer:
if writer not in writers:
writer = Writer(writer)
db.session.add(writer)
db.session.flush()
# Save director information
writers[writer.name] = writer.id
# add director_id to movie
movie.add_writer(writer.id)
else:
movie.add_writer(writers[writer])
# Add Actors
for actor_name in movie_actors:
if actor_name != '':
if actor_name not in actors:
actor = Actor(actor_name)
db.session.add(actor)
db.session.flush()
# Save director information
actors[actor_name] = actor.id
# add actor to movie
movie.add_actor(actor)
else:
movie.add_actor(actor_name)
# Add Movie in DB
db.session.add(movie)
db.session.flush()
# Store movie id in local dictionary
movies[name] = movie.id
# Create new Location, if not empty and does not exist
if '' != location:
if (location, movie.id) not in locations:
new_location = Location(location, fun_facts, movie.id)
db.session.add(new_location)
db.session.flush()
locations[(location, movie.id)] = new_location.id
# Commit imported data
db.session.commit()
except FileNotFoundError:
print("File : `" + file_path + '` not found')
|
{
"content_hash": "aa9f77f315b33751b8082872531d6240",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 87,
"avg_line_length": 33.38255033557047,
"alnum_prop": 0.4847205468435867,
"repo_name": "boltzj/movies-in-sf",
"id": "1973a8c864685ca1892c9ca03879177ba388d513",
"size": "4974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/utils/import_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28684"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('hs_core', '0007_auto_20151114_1618'),
]
operations = [
migrations.AlterUniqueTogether(
name='subject',
unique_together=set([('value', 'content_type', 'object_id')]),
),
]
|
{
"content_hash": "0ebf0ca8a329f762cc6877f4f949ff5d",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 74,
"avg_line_length": 22.294117647058822,
"alnum_prop": 0.5989445910290238,
"repo_name": "FescueFungiShare/hydroshare",
"id": "4ac1d9b3785ffc354b3932848d2a284461c9b996",
"size": "403",
"binary": false,
"copies": "3",
"ref": "refs/heads/FescueFungiShare-develop",
"path": "hs_core/migrations/0008_auto_20151114_2024.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "374952"
},
{
"name": "HTML",
"bytes": "1107800"
},
{
"name": "JavaScript",
"bytes": "1822132"
},
{
"name": "Python",
"bytes": "3599347"
},
{
"name": "R",
"bytes": "4475"
},
{
"name": "Shell",
"bytes": "49970"
},
{
"name": "XSLT",
"bytes": "790987"
}
],
"symlink_target": ""
}
|
def vertical(hfile):
"""Reads psipred output .ss2 file.
@param hfile psipred .ss2 file
@return secondary structure string.
"""
result = ''
for l in hfile:
if l.startswith('#'):
continue
if not l.strip():
continue
l_arr = l.strip().split()
result += l_arr[2]
return result
def horizontal(hfile):
"""Reads psipred output .horiz file.
@param hfile psipred .horiz file
@return secondary structure string.
"""
result = ''
for line in hfile:
line_arr = line.strip().split(' ')
if line_arr[0] == 'Pred:' and len(line_arr) > 1:
result += line_arr[1]
return result
def horizontal_conf(hfile):
"""Reads psipred output .horiz file.
@param hfile psipred .horiz file
@return secondary structure string.
"""
result = ''
for line in hfile:
line_arr = line.strip().split(' ')
if line_arr[0] == 'Conf:' and len(line_arr) > 1:
result += line_arr[1]
return result
def horizontal_seq(hfile):
"""Reads psipred output .horiz file.
@param hfile psipred .horiz file
@return amino acid sequence.
"""
result = ''
for line in hfile:
line_arr = line.strip().split(' ')
if line_arr[0] == 'AA:':
result += line_arr[1]
return result
|
{
"content_hash": "5c2e07d38e771e50de86a00cf9f21613",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 56,
"avg_line_length": 22.75409836065574,
"alnum_prop": 0.5489913544668588,
"repo_name": "MMichel/contact-vis",
"id": "f6942ee145df4a8d0b591aa0919ed484af9c37f9",
"size": "1411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contactvis/parsing/parse_psipred.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29358"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.