text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from sentry.conf.server import *
import os.path
CONF_ROOT = os.path.dirname(__file__)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(CONF_ROOT, 'sentry.db'),
'USER': 'postgres',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# You should not change this setting after your database has been created
# unless you have altered all schemas first
SENTRY_USE_BIG_INTS = True
# If you're expecting any kind of real traffic on Sentry, we highly recommend
# configuring the CACHES and Redis settings
###########
# General #
###########
# The administrative email for this installation.
# Note: This will be reported back to getsentry.com as the point of contact. See
# the beacon documentation for more information. This **must** be a string.
# SENTRY_ADMIN_EMAIL = 'your.name@example.com'
SENTRY_ADMIN_EMAIL = ''
# Instruct Sentry that this install intends to be run by a single organization
# and thus various UI optimizations should be enabled.
SENTRY_SINGLE_ORGANIZATION = True
#########
# Redis #
#########
SENTRY_REDIS_OPTIONS = {
'hosts': {
0: {
'host': '127.0.0.1',
'port': 6379,
}
}
}
#########
# Cache #
#########
SENTRY_CACHE = 'sentry.cache.redis.RedisCache'
#########
# Queue #
#########
CELERY_ALWAYS_EAGER = False
BROKER_URL = 'redis://localhost:6379'
###############
# Rate Limits #
###############
SENTRY_RATELIMITER = 'sentry.ratelimits.redis.RedisRateLimiter'
##################
# Update Buffers #
##################
SENTRY_BUFFER = 'sentry.buffer.redis.RedisBuffer'
##########
# Quotas #
##########
SENTRY_QUOTAS = 'sentry.quotas.redis.RedisQuota'
########
# TSDB #
########
SENTRY_TSDB = 'sentry.tsdb.redis.RedisTSDB'
################
# File storage #
################
SENTRY_FILESTORE = 'django.core.files.storage.FileSystemStorage'
SENTRY_FILESTORE_OPTIONS = {
'location': '/tmp/sentry-files',
}
##############
# Web Server #
##############
SENTRY_URL_PREFIX = 'http://0.0.0.0:9000' # No trailing slash!
SENTRY_WEB_HOST = '0.0.0.0'
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {
# 'workers': 3, # the number of gunicorn workers
# 'secure_scheme_headers': {'X-FORWARDED-PROTO': 'https'},
}
###############
# Mail Server #
###############
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_HOST_PASSWORD = ''
EMAIL_HOST_USER = ''
EMAIL_PORT = 25
EMAIL_USE_TLS = False
SERVER_EMAIL = 'root@localhost'
MAILGUN_API_KEY = ''
########
# etc. #
########
# If this file ever becomes compromised, it's important to regenerate your SECRET_KEY
# Changing this value will result in all current sessions being invalidated
SECRET_KEY = 'DRAct1ThRQl9iCKDMO8+Cyn5LyeN1k4fsujEzdF8+RHE/bZcP2vyZA=='
|
{
"content_hash": "1bbdc686757a649a20b2f8f56f4dd4e8",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 85,
"avg_line_length": 22.544,
"alnum_prop": 0.6117814052519518,
"repo_name": "kemchos/docker-sentry",
"id": "33662a5195a65ad2181969c3b157dc4882152f5a",
"size": "2942",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sentry/sentry.conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6865"
},
{
"name": "Python",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""Keras timeseries dataset utilities."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.preprocessing.timeseries_dataset_from_array', v1=[])
def timeseries_dataset_from_array(
data,
targets,
sequence_length,
sequence_stride=1,
sampling_rate=1,
batch_size=128,
shuffle=False,
seed=None,
start_index=None,
end_index=None):
"""Creates a dataset of sliding windows over a timeseries provided as array.
This function takes in a sequence of data-points gathered at
equal intervals, along with time series parameters such as
length of the sequences/windows, spacing between two sequence/windows, etc.,
to produce batches of timeseries inputs and targets.
Arguments:
data: Numpy array or eager tensor
containing consecutive data points (timesteps).
Axis 0 is expected to be the time dimension.
targets: Targets corresponding to timesteps in `data`.
It should have same length as `data`. `targets[i]` should be the target
corresponding to the window that starts at index `i`
(see example 2 below).
Pass None if you don't have target data (in this case the dataset will
only yield the input data).
sequence_length: Length of the output sequences (in number of timesteps).
sequence_stride: Period between successive output sequences.
For stride `s`, output samples would
start at index `data[i]`, `data[i + s]`, `data[i + 2 * s]`, etc.
sampling_rate: Period between successive individual timesteps
within sequences. For rate `r`, timesteps
`data[i], data[i + r], ... data[i + sequence_length]`
are used for create a sample sequence.
batch_size: Number of timeseries samples in each batch
(except maybe the last one).
shuffle: Whether to shuffle output samples,
or instead draw them in chronological order.
seed: Optional int; random seed for shuffling.
start_index: Optional int; data points earlier (exclusive)
than `start_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
end_index: Optional int; data points later (exclusive) than `end_index`
will not be used in the output sequences.
This is useful to reserve part of the data for test or validation.
Returns:
A tf.data.Dataset instance. If `targets` was passed, the dataset yields
tuple `(batch_of_sequences, batch_of_targets)`. If not, the dataset yields
only `batch_of_sequences`.
Example 1:
Consider indices `[0, 1, ... 99]`.
With `sequence_length=10, sampling_rate=2, sequence_stride=3`,
`shuffle=False`, the dataset will yield batches of sequences
composed of the following indices:
```
First sequence: [0 2 4 6 8 10 12 14 16 18]
Second sequence: [3 5 7 9 11 13 15 17 19 21]
Third sequence: [6 8 10 12 14 16 18 20 22 24]
...
Last sequence: [78 80 82 84 86 88 90 92 94 96]
```
In this case the last 3 data points are discarded since no full sequence
can be generated to include them (the next sequence would have started
at index 81, and thus its last step would have gone over 99).
Example 2: temporal regression. Consider an array `data` of scalar
values, of shape `(steps,)`. To generate a dataset that uses the past 10
timesteps to predict the next timestep, you would use:
```python
input_data = data[:-10]
targets = data[10:]
dataset = tf.keras.preprocessing.timeseries_dataset_from_array(
input_data, targets, sequence_length=10)
for batch in dataset:
inputs, targets = batch
assert np.array_equal(inputs[0], data[:10]) # First sequence: steps [0-9]
assert np.array_equal(targets[0], data[10]) # Corresponding target: step 10
break
```
"""
# Validate the shape of data and targets
if targets is not None and len(targets) != len(data):
raise ValueError('Expected data and targets to have the same number of '
'time steps (axis 0) but got '
'shape(data) = %s; shape(targets) = %s.' %
(data.shape, targets.shape))
if start_index and (start_index < 0 or start_index >= len(data)):
raise ValueError('start_index must be higher than 0 and lower than the '
'length of the data. Got: start_index=%s '
'for data of length %s.' % (start_index, len(data)))
if end_index:
if start_index and end_index <= start_index:
raise ValueError('end_index must be higher than start_index. Got: '
'start_index=%s, end_index=%s.' %
(start_index, end_index))
if end_index >= len(data):
raise ValueError('end_index must be lower than the length of the data. '
'Got: end_index=%s' % (end_index,))
if end_index <= 0:
raise ValueError('end_index must be higher than 0. '
'Got: end_index=%s' % (end_index,))
# Validate strides
if sampling_rate <= 0 or sampling_rate >= len(data):
raise ValueError(
'sampling_rate must be higher than 0 and lower than '
'the length of the data. Got: '
'sampling_rate=%s for data of length %s.' % (sampling_rate, len(data)))
if sequence_stride <= 0 or sequence_stride >= len(data):
raise ValueError(
'sequence_stride must be higher than 0 and lower than '
'the length of the data. Got: sequence_stride=%s '
'for data of length %s.' % (sequence_stride, len(data)))
if start_index is None:
start_index = 0
if end_index is None:
end_index = len(data)
# Determine the lowest dtype to store start positions (to lower memory usage).
num_seqs = end_index - start_index - (sequence_length * sampling_rate) + 1
if num_seqs < 2147483647:
index_dtype = 'int32'
else:
index_dtype = 'int64'
# Generate start positions
start_positions = np.arange(0, num_seqs, sequence_stride, dtype=index_dtype)
if shuffle:
if seed is None:
seed = np.random.randint(1e6)
rng = np.random.RandomState(seed)
rng.shuffle(start_positions)
sequence_length = math_ops.cast(sequence_length, dtype=index_dtype)
sampling_rate = math_ops.cast(sampling_rate, dtype=index_dtype)
positions_ds = dataset_ops.Dataset.from_tensors(start_positions).repeat()
# For each initial window position, generates indices of the window elements
indices = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(len(start_positions)), positions_ds)).map(
lambda i, positions: math_ops.range( # pylint: disable=g-long-lambda
positions[i],
positions[i] + sequence_length * sampling_rate,
sampling_rate),
num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = sequences_from_indices(data, indices, start_index, end_index)
if targets is not None:
indices = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(len(start_positions)), positions_ds)).map(
lambda i, positions: positions[i],
num_parallel_calls=dataset_ops.AUTOTUNE)
target_ds = sequences_from_indices(
targets, indices, start_index, end_index)
dataset = dataset_ops.Dataset.zip((dataset, target_ds))
if shuffle:
# Shuffle locally at each iteration
dataset = dataset.shuffle(buffer_size=batch_size * 8, seed=seed)
dataset = dataset.batch(batch_size)
return dataset
def sequences_from_indices(array, indices_ds, start_index, end_index):
dataset = dataset_ops.Dataset.from_tensors(array[start_index : end_index])
dataset = dataset_ops.Dataset.zip((dataset.repeat(), indices_ds)).map(
lambda steps, inds: array_ops.gather(steps, inds), # pylint: disable=unnecessary-lambda
num_parallel_calls=dataset_ops.AUTOTUNE)
return dataset
|
{
"content_hash": "8a2d9ed4abd2960835e9e5a1c5f0b2c0",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 94,
"avg_line_length": 42.18652849740933,
"alnum_prop": 0.6736674035863425,
"repo_name": "aam-at/tensorflow",
"id": "64e2d06554d0d4c14b9f378537fc54a2c4df591a",
"size": "8831",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/preprocessing/timeseries.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "16049"
},
{
"name": "C",
"bytes": "784149"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "69481042"
},
{
"name": "CMake",
"bytes": "204596"
},
{
"name": "Dockerfile",
"bytes": "73667"
},
{
"name": "Go",
"bytes": "1670128"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "844222"
},
{
"name": "Jupyter Notebook",
"bytes": "1665601"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "101287"
},
{
"name": "Objective-C",
"bytes": "104023"
},
{
"name": "Objective-C++",
"bytes": "182460"
},
{
"name": "PHP",
"bytes": "17733"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "49451363"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4697"
},
{
"name": "Shell",
"bytes": "495434"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
},
{
"name": "TSQL",
"bytes": "921"
}
],
"symlink_target": ""
}
|
from pyface.window import *
|
{
"content_hash": "42251d472000f56208fd5d868b9729ef",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 27,
"avg_line_length": 28,
"alnum_prop": 0.7857142857142857,
"repo_name": "enthought/etsproxy",
"id": "a663ebf6a917c820f1cf9328d3965e7ce96d9da1",
"size": "43",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/pyface/window.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0171_userprofile_dense_mode'),
]
operations = [
migrations.AlterField(
model_name='customprofilefield',
name='field_type',
field=models.PositiveSmallIntegerField(choices=[(1, 'Short text'), (2, 'Long text'), (4, 'Date'), (5, 'URL'), (3, 'Choice'), (6, 'User')], default=1),
),
]
|
{
"content_hash": "22b959677921b8800bfb69a34e4edf47",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 162,
"avg_line_length": 29.25,
"alnum_prop": 0.5769230769230769,
"repo_name": "brainwane/zulip",
"id": "aab971821e9bcec3bb33a70012cd7b8b5f5208bc",
"size": "519",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "zerver/migrations/0172_add_user_type_of_custom_profile_field.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "423578"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "647926"
},
{
"name": "JavaScript",
"bytes": "2886792"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "90558"
},
{
"name": "Python",
"bytes": "6000548"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "110849"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0003_auto_20150309_0042'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='slug',
),
]
|
{
"content_hash": "80a65ee08a426e7722948443e4d2fc31",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 44,
"avg_line_length": 18.88235294117647,
"alnum_prop": 0.5794392523364486,
"repo_name": "CristhGunners/Photon",
"id": "50435bf98d16f99182472401190a0ad9277bdd4e",
"size": "345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "photon/apps/user/migrations/0004_remove_user_slug.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16326"
},
{
"name": "HTML",
"bytes": "56235"
},
{
"name": "JavaScript",
"bytes": "3251"
},
{
"name": "Python",
"bytes": "31221"
}
],
"symlink_target": ""
}
|
"""Helper classes for tensor shape inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
@tf_export("Dimension")
class Dimension(object):
"""Represents the value of one dimension in a TensorShape."""
def __init__(self, value):
"""Creates a new Dimension with the given value."""
if value is None:
self._value = None
elif isinstance(value, dtypes.DType):
raise TypeError("Cannot convert %s to Dimension" % value)
else:
self._value = int(value)
if (not isinstance(value, compat.bytes_or_text_types) and
self._value != value):
raise ValueError("Ambiguous dimension: %s" % value)
if self._value < 0:
raise ValueError("Dimension %d must be >= 0" % self._value)
def __repr__(self):
return "Dimension(%s)" % repr(self._value)
def __str__(self):
value = self._value
return "?" if value is None else str(value)
def __eq__(self, other):
"""Returns true if `other` has the same known value as this Dimension."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value == other.value
def __ne__(self, other):
"""Returns true if `other` has a different known value from `self`."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value != other.value
def __int__(self):
return self._value
# This is needed for Windows.
# See https://github.com/tensorflow/tensorflow/pull/9780
def __long__(self):
return self._value
def __index__(self):
# Allow use in Python 3 range
return self._value
@property
def value(self):
"""The value of this dimension, or None if it is unknown."""
return self._value
def is_compatible_with(self, other):
"""Returns true if `other` is compatible with this Dimension.
Two known Dimensions are compatible if they have the same value.
An unknown Dimension is compatible with all other Dimensions.
Args:
other: Another Dimension.
Returns:
True if this Dimension and `other` are compatible.
"""
other = as_dimension(other)
return (self._value is None or other.value is None or
self._value == other.value)
def assert_is_compatible_with(self, other):
"""Raises an exception if `other` is not compatible with this Dimension.
Args:
other: Another Dimension.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
if not self.is_compatible_with(other):
raise ValueError("Dimensions %s and %s are not compatible" % (self,
other))
def merge_with(self, other):
"""Returns a Dimension that combines the information in `self` and `other`.
Dimensions are combined as follows:
```python
tf.Dimension(n) .merge_with(tf.Dimension(n)) == tf.Dimension(n)
tf.Dimension(n) .merge_with(tf.Dimension(None)) == tf.Dimension(n)
tf.Dimension(None).merge_with(tf.Dimension(n)) == tf.Dimension(n)
tf.Dimension(None).merge_with(tf.Dimension(None)) == tf.Dimension(None)
tf.Dimension(n) .merge_with(tf.Dimension(m)) # raises ValueError for n != m
```
Args:
other: Another Dimension.
Returns:
A Dimension containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
other = as_dimension(other)
self.assert_is_compatible_with(other)
if self._value is None:
return Dimension(other.value)
else:
return Dimension(self._value)
def __add__(self, other):
"""Returns the sum of `self` and `other`.
Dimensions are summed as follows:
```python
tf.Dimension(m) + tf.Dimension(n) == tf.Dimension(m + n)
tf.Dimension(m) + tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) + tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) + tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value + other.value)
def __radd__(self, other):
"""Returns the sum of `other` and `self`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
return self + other
def __sub__(self, other):
"""Returns the subtraction of `other` from `self`.
Dimensions are subtracted as follows:
```python
tf.Dimension(m) - tf.Dimension(n) == tf.Dimension(m - n)
tf.Dimension(m) - tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) - tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) - tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the subtraction of `other` from `self`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value - other.value)
def __rsub__(self, other):
"""Returns the subtraction of `self` from `other`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the subtraction of `self` from `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(other.value - self._value)
def __mul__(self, other):
"""Returns the product of `self` and `other`.
Dimensions are summed as follows:
```python
tf.Dimension(m) * tf.Dimension(n) == tf.Dimension(m * n)
tf.Dimension(m) * tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) * tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) * tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the product of `self` and `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value * other.value)
def __rmul__(self, other):
"""Returns the product of `self` and `other`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the product of `self` and `other`.
"""
return self * other
def __floordiv__(self, other):
"""Returns the quotient of `self` and `other` rounded down.
Dimensions are divided as follows:
```python
tf.Dimension(m) // tf.Dimension(n) == tf.Dimension(m // n)
tf.Dimension(m) // tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) // tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) // tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value // other.value)
def __rfloordiv__(self, other):
"""Returns the quotient of `other` and `self` rounded down.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(other.value // self._value)
def __div__(self, other):
"""DEPRECATED: Use `__floordiv__` via `x // y` instead.
This function exists only for backwards compatibility purposes; new code
should use `__floordiv__` via the syntax `x // y`. Using `x // y`
communicates clearly that the result rounds down, and is forward compatible
to Python 3.
Args:
other: Another `Dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
return self // other
def __mod__(self, other):
"""Returns `self` modulo `other`.
Dimension moduli are computed as follows:
```python
tf.Dimension(m) % tf.Dimension(n) == tf.Dimension(m % n)
tf.Dimension(m) % tf.Dimension(None) == tf.Dimension(None)
tf.Dimension(None) % tf.Dimension(n) == tf.Dimension(None)
tf.Dimension(None) % tf.Dimension(None) == tf.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is `self` modulo `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value % other.value)
def __rmod__(self, other):
"""Returns `other` modulo `self`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is `other` modulo `self`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
return other % self
def __lt__(self, other):
"""Returns True if `self` is known to be less than `other`.
Dimensions are compared as follows:
```python
(tf.Dimension(m) < tf.Dimension(n)) == (m < n)
(tf.Dimension(m) < tf.Dimension(None)) == None
(tf.Dimension(None) < tf.Dimension(n)) == None
(tf.Dimension(None) < tf.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value < other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value < other.value
def __le__(self, other):
"""Returns True if `self` is known to be less than or equal to `other`.
Dimensions are compared as follows:
```python
(tf.Dimension(m) <= tf.Dimension(n)) == (m <= n)
(tf.Dimension(m) <= tf.Dimension(None)) == None
(tf.Dimension(None) <= tf.Dimension(n)) == None
(tf.Dimension(None) <= tf.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value <= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value <= other.value
def __gt__(self, other):
"""Returns True if `self` is known to be greater than `other`.
Dimensions are compared as follows:
```python
(tf.Dimension(m) > tf.Dimension(n)) == (m > n)
(tf.Dimension(m) > tf.Dimension(None)) == None
(tf.Dimension(None) > tf.Dimension(n)) == None
(tf.Dimension(None) > tf.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value > other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value > other.value
def __ge__(self, other):
"""Returns True if `self` is known to be greater than or equal to `other`.
Dimensions are compared as follows:
```python
(tf.Dimension(m) >= tf.Dimension(n)) == (m >= n)
(tf.Dimension(m) >= tf.Dimension(None)) == None
(tf.Dimension(None) >= tf.Dimension(n)) == None
(tf.Dimension(None) >= tf.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value >= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value >= other.value
def __reduce__(self):
return Dimension, (self._value,)
def as_dimension(value):
"""Converts the given value to a Dimension.
A Dimension input will be returned unmodified.
An input of `None` will be converted to an unknown Dimension.
An integer input will be converted to a Dimension with that value.
Args:
value: The value to be converted.
Returns:
A Dimension corresponding to the given value.
"""
if isinstance(value, Dimension):
return value
else:
return Dimension(value)
@tf_export("TensorShape")
class TensorShape(object):
"""Represents the shape of a `Tensor`.
A `TensorShape` represents a possibly-partial shape specification for a
`Tensor`. It may be one of the following:
* *Fully-known shape:* has a known number of dimensions and a known size
for each dimension. e.g. `TensorShape([16, 256])`
* *Partially-known shape:* has a known number of dimensions, and an unknown
size for one or more dimension. e.g. `TensorShape([None, 256])`
* *Unknown shape:* has an unknown number of dimensions, and an unknown
size in all dimensions. e.g. `TensorShape(None)`
If a tensor is produced by an operation of type `"Foo"`, its shape
may be inferred if there is a registered shape function for
`"Foo"`. See [Shape
functions](https://tensorflow.org/extend/adding_an_op#shape_functions_in_c)
for details of shape functions and how to register them. Alternatively,
the shape may be set explicitly using `tf.Tensor.set_shape`.
"""
def __init__(self, dims):
"""Creates a new TensorShape with the given dimensions.
Args:
dims: A list of Dimensions, or None if the shape is unspecified.
DEPRECATED: A single integer is treated as a singleton list.
Raises:
TypeError: If dims cannot be converted to a list of dimensions.
"""
# TODO(irving): Eliminate the single integer special case.
if dims is None:
self._dims = None
elif isinstance(dims, compat.bytes_or_text_types):
raise TypeError("A string has ambiguous TensorShape, please wrap in a "
"list or convert to an int: %s" % dims)
elif isinstance(dims, tensor_shape_pb2.TensorShapeProto):
if dims.unknown_rank:
self._dims = None
else:
self._dims = [
# Protos store variable-size dimensions as -1
as_dimension(dim.size if dim.size != -1 else None)
for dim in dims.dim
]
elif isinstance(dims, TensorShape):
self._dims = dims.dims
else:
try:
dims_iter = iter(dims)
except TypeError:
# Treat as a singleton dimension
self._dims = [as_dimension(dims)]
else:
# Got a list of dimensions
self._dims = [as_dimension(d) for d in dims_iter]
self._ndims = None
def __repr__(self):
return "TensorShape(%r)" % self._dims
def __str__(self):
if self.ndims is None:
return "<unknown>"
elif self.ndims == 1:
return "(%s,)" % self._dims[0]
else:
return "(%s)" % ", ".join(str(d) for d in self._dims)
@property
def dims(self):
"""Returns a list of Dimensions, or None if the shape is unspecified."""
return self._dims
@dims.setter
def dims(self, dims):
self._dims = dims
self._ndims = None
@property
def ndims(self):
"""Returns the rank of this shape, or None if it is unspecified."""
if self._dims is None:
return None
else:
if self._ndims is None:
self._ndims = len(self._dims)
return self._ndims
def __len__(self):
"""Returns the rank of this shape, or raises ValueError if unspecified."""
if self._dims is None:
raise ValueError("Cannot take the length of Shape with unknown rank.")
return self.ndims
def __bool__(self):
"""Returns True if this shape contains non-zero information."""
return self._dims is not None
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __iter__(self):
"""Returns `self.dims` if the rank is known, otherwise raises ValueError."""
if self._dims is None:
raise ValueError("Cannot iterate over a shape with unknown rank.")
else:
return iter(self._dims)
def __getitem__(self, key):
"""Returns the value of a dimension or a shape, depending on the key.
Args:
key: If `key` is an integer, returns the dimension at that index;
otherwise if `key` is a slice, returns a TensorShape whose
dimensions are those selected by the slice from `self`.
Returns:
A dimension if `key` is an integer, or a `TensorShape` if `key` is a
slice.
Raises:
ValueError: If `key` is a slice and `self` is completely unknown and
the step is set.
"""
if self._dims is not None:
if isinstance(key, slice):
return TensorShape(self._dims[key])
else:
return self._dims[key]
else:
if isinstance(key, slice):
start = key.start if key.start is not None else 0
stop = key.stop
if key.step is not None:
# TODO(mrry): Handle these maybe.
raise ValueError("Steps are not yet handled")
if stop is None:
# NOTE(mrry): This implies that TensorShape(None) is compatible with
# TensorShape(None)[1:], which is obviously not true. It would be
# possible to track the number of dimensions symbolically,
# and perhaps we should do that.
return unknown_shape()
elif start < 0 or stop < 0:
# TODO(mrry): Handle this better, as it will be useful for handling
# suffixes of otherwise unknown shapes.
return unknown_shape()
else:
return unknown_shape(ndims=stop - start)
else:
return Dimension(None)
def num_elements(self):
"""Returns the total number of elements, or none for incomplete shapes."""
if self.is_fully_defined():
size = 1
for dim in self._dims:
size *= dim.value
return size
else:
return None
def merge_with(self, other):
"""Returns a `TensorShape` combining the information in `self` and `other`.
The dimensions in `self` and `other` are merged elementwise,
according to the rules defined for `Dimension.merge_with()`.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible.
"""
other = as_shape(other)
if self._dims is None:
return other
else:
try:
self.assert_same_rank(other)
new_dims = []
for i, dim in enumerate(self._dims):
new_dims.append(dim.merge_with(other[i]))
return TensorShape(new_dims)
except ValueError:
raise ValueError("Shapes %s and %s are not compatible" % (self, other))
def concatenate(self, other):
"""Returns the concatenation of the dimension in `self` and `other`.
*N.B.* If either `self` or `other` is completely unknown,
concatenation will discard information about the other shape. In
future, we might support concatenation that preserves this
information for use with slicing.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` whose dimensions are the concatenation of the
dimensions in `self` and `other`.
"""
# TODO(mrry): Handle the case where we concatenate a known shape with a
# completely unknown shape, so that we can use the partial information.
other = as_shape(other)
if self._dims is None or other.dims is None:
return unknown_shape()
else:
return TensorShape(self._dims + other.dims)
def assert_same_rank(self, other):
"""Raises an exception if `self` and `other` do not have compatible ranks.
Args:
other: Another `TensorShape`.
Raises:
ValueError: If `self` and `other` do not represent shapes with the
same rank.
"""
other = as_shape(other)
if self.ndims is not None and other.ndims is not None:
if self.ndims != other.ndims:
raise ValueError("Shapes %s and %s must have the same rank" % (self,
other))
def assert_has_rank(self, rank):
"""Raises an exception if `self` is not compatible with the given `rank`.
Args:
rank: An integer.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
if self.ndims not in (None, rank):
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank(self, rank):
"""Returns a shape based on `self` with the given rank.
This method promotes a completely unknown shape to one with a
known rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with the given rank.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
try:
return self.merge_with(unknown_shape(ndims=rank))
except ValueError:
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank_at_least(self, rank):
"""Returns a shape based on `self` with at least the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at least the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at least the given
`rank`.
"""
if self.ndims is not None and self.ndims < rank:
raise ValueError("Shape %s must have rank at least %d" % (self, rank))
else:
return self
def with_rank_at_most(self, rank):
"""Returns a shape based on `self` with at most the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at most the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at most the given
`rank`.
"""
if self.ndims is not None and self.ndims > rank:
raise ValueError("Shape %s must have rank at most %d" % (self, rank))
else:
return self
def is_compatible_with(self, other):
"""Returns True iff `self` is compatible with `other`.
Two possibly-partially-defined shapes are compatible if there
exists a fully-defined shape that both shapes can represent. Thus,
compatibility allows the shape inference code to reason about
partially-defined shapes. For example:
* TensorShape(None) is compatible with all shapes.
* TensorShape([None, None]) is compatible with all two-dimensional
shapes, such as TensorShape([32, 784]), and also TensorShape(None). It is
not compatible with, for example, TensorShape([None]) or
TensorShape([None, None, None]).
* TensorShape([32, None]) is compatible with all two-dimensional shapes
with size 32 in the 0th dimension, and also TensorShape([None, None])
and TensorShape(None). It is not compatible with, for example,
TensorShape([32]), TensorShape([32, None, 1]) or TensorShape([64, None]).
* TensorShape([32, 784]) is compatible with itself, and also
TensorShape([32, None]), TensorShape([None, 784]), TensorShape([None,
None]) and TensorShape(None). It is not compatible with, for example,
TensorShape([32, 1, 784]) or TensorShape([None]).
The compatibility relation is reflexive and symmetric, but not
transitive. For example, TensorShape([32, 784]) is compatible with
TensorShape(None), and TensorShape(None) is compatible with
TensorShape([4, 4]), but TensorShape([32, 784]) is not compatible with
TensorShape([4, 4]).
Args:
other: Another TensorShape.
Returns:
True iff `self` is compatible with `other`.
"""
other = as_shape(other)
if self._dims is not None and other.dims is not None:
if self.ndims != other.ndims:
return False
for x_dim, y_dim in zip(self._dims, other.dims):
if not x_dim.is_compatible_with(y_dim):
return False
return True
def assert_is_compatible_with(self, other):
"""Raises exception if `self` and `other` do not represent the same shape.
This method can be used to assert that there exists a shape that both
`self` and `other` represent.
Args:
other: Another TensorShape.
Raises:
ValueError: If `self` and `other` do not represent the same shape.
"""
if not self.is_compatible_with(other):
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
def most_specific_compatible_shape(self, other):
"""Returns the most specific TensorShape compatible with `self` and `other`.
* TensorShape([None, 1]) is the most specific TensorShape compatible with
both TensorShape([2, 1]) and TensorShape([5, 1]). Note that
TensorShape(None) is also compatible with above mentioned TensorShapes.
* TensorShape([1, 2, 3]) is the most specific TensorShape compatible with
both TensorShape([1, 2, 3]) and TensorShape([1, 2, 3]). There are more
less specific TensorShapes compatible with above mentioned TensorShapes,
e.g. TensorShape([1, 2, None]), TensorShape(None).
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` which is the most specific compatible shape of `self`
and `other`.
"""
other = as_shape(other)
if self._dims is None or other.dims is None or self.ndims != other.ndims:
return unknown_shape()
dims = [(Dimension(None))] * self.ndims
for i, (d1, d2) in enumerate(zip(self._dims, other.dims)):
if d1 is not None and d2 is not None and d1 == d2:
dims[i] = d1
return TensorShape(dims)
def is_fully_defined(self):
"""Returns True iff `self` is fully defined in every dimension."""
return (self._dims is not None and all(dim.value is not None
for dim in self._dims))
def assert_is_fully_defined(self):
"""Raises an exception if `self` is not fully defined in every dimension.
Raises:
ValueError: If `self` does not have a known value for every dimension.
"""
if not self.is_fully_defined():
raise ValueError("Shape %s is not fully defined" % self)
def as_list(self):
"""Returns a list of integers or `None` for each dimension.
Returns:
A list of integers or `None` for each dimension.
Raises:
ValueError: If `self` is an unknown shape with an unknown rank.
"""
if self._dims is None:
raise ValueError("as_list() is not defined on an unknown TensorShape.")
return [dim.value for dim in self._dims]
def as_proto(self):
"""Returns this shape as a `TensorShapeProto`."""
if self._dims is None:
return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)
else:
return tensor_shape_pb2.TensorShapeProto(dim=[
tensor_shape_pb2.TensorShapeProto.Dim(size=-1
if d.value is None else d.value)
for d in self._dims
])
def __eq__(self, other):
"""Returns True if `self` is equivalent to `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
return self._dims == other.dims
def __ne__(self, other):
"""Returns True if `self` is known to be different from `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
if self.ndims is None or other.ndims is None:
raise ValueError("The inequality of unknown TensorShapes is undefined.")
if self.ndims != other.ndims:
return True
return self._dims != other.dims
def __reduce__(self):
return TensorShape, (self._dims,)
def as_shape(shape):
"""Converts the given object to a TensorShape."""
if isinstance(shape, TensorShape):
return shape
else:
return TensorShape(shape)
def unknown_shape(ndims=None):
"""Returns an unknown TensorShape, optionally with a known rank.
Args:
ndims: (Optional) If specified, the number of dimensions in the shape.
Returns:
An unknown TensorShape.
"""
if ndims is None:
return TensorShape(None)
else:
return TensorShape([Dimension(None)] * ndims)
_SCALAR_SHAPE = TensorShape([])
def scalar():
"""Returns a shape representing a scalar."""
return _SCALAR_SHAPE
def vector(length):
"""Returns a shape representing a vector.
Args:
length: The length of the vector, which may be None if unknown.
Returns:
A TensorShape representing a vector of the given length.
"""
return TensorShape([length])
def matrix(rows, cols):
"""Returns a shape representing a matrix.
Args:
rows: The number of rows in the matrix, which may be None if unknown.
cols: The number of columns in the matrix, which may be None if unknown.
Returns:
A TensorShape representing a matrix of the given size.
"""
return TensorShape([rows, cols])
|
{
"content_hash": "c48a6d3fb1b7d76c8f55c12ae7806137",
"timestamp": "",
"source": "github",
"line_count": 981,
"max_line_length": 82,
"avg_line_length": 30.902140672782874,
"alnum_prop": 0.6344054098631041,
"repo_name": "xodus7/tensorflow",
"id": "3c2a736fb98915af3048a593ad1908b8afb879b3",
"size": "31004",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/tensor_shape.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "340946"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "48861698"
},
{
"name": "CMake",
"bytes": "195699"
},
{
"name": "Dockerfile",
"bytes": "36400"
},
{
"name": "Go",
"bytes": "1240309"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "834061"
},
{
"name": "Jupyter Notebook",
"bytes": "2604756"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52618"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40952138"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "459258"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
import unittest
from fixofx.ofx import Response
from fixofx.test.ofx_test_utils import get_checking_stmt
class DocumentTests(unittest.TestCase):
def setUp(self):
self.checking = get_checking_stmt()
def test_statement_as_xml(self):
response = Response(self.checking)
self.assertEqual('<?xml version="1.0"', response.as_xml()[:19])
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "68a8caf360b80e17377d3b1eb0760dfc",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 71,
"avg_line_length": 24.88235294117647,
"alnum_prop": 0.6572104018912529,
"repo_name": "henriquebastos/fixofx",
"id": "5eef987704495595b90d459a529022509e5d3276",
"size": "1007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixofx/test/test_ofx_document.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "189952"
}
],
"symlink_target": ""
}
|
"""Utilties for V2 control flow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import ops
from tensorflow.python.framework.func_graph import FuncGraph
from tensorflow.python.ops import control_flow_util
class CondBranchFuncGraph(FuncGraph):
"""FuncGraph for branches of tf.cond().
This is used to distinguish cond branches from other functions.
"""
pass
class WhileCondFuncGraph(FuncGraph):
"""FuncGraph for the condition of tf.while_loop().
This is used to distinguish while conditions from other functions.
"""
pass
class WhileBodyFuncGraph(FuncGraph):
"""FuncGraph for the body of tf.while_loop().
This is used to distinguish while bodies from other functions.
"""
pass
def in_defun():
"""Returns if the current graph is, or is nested in, a defun."""
if context.executing_eagerly(): return False
graph = ops.get_default_graph()
while (isinstance(graph, CondBranchFuncGraph) or
isinstance(graph, WhileBodyFuncGraph)):
graph = graph.outer_graph
return isinstance(graph, FuncGraph)
def create_new_tf_function(func_graph):
"""Converts func_graph to a TF_Function and adds it to the current graph.
Args:
func_graph: FuncGraph
Returns:
The name of the new TF_Function.
"""
func = function._EagerDefinedFunction( # pylint: disable=protected-access
func_graph.name, func_graph, func_graph.inputs, func_graph.outputs, {})
func.add_to_graph(func_graph.outer_graph)
return func_graph.name
def unique_fn_name(scope, name):
"""Returns a unique name to use for a control flow function.
Args:
scope: A name scope string.
name: An identifier for this function (e.g. "true", "body").
Returns:
A string, the name to use for the function.
"""
return ("%s%s_%s" % (scope, name, ops.uid())).replace("/", "_")
def unique_grad_fn_name(forward_name):
return "%s_grad_%s" % (forward_name, ops.uid())
def maybe_set_lowering_attr(op):
"""Sets the flag to enable lowering on `op` if necessary.
Lowering allows cond_v2 and while_v2 to avoid some of the limitations of
Functions, allowing users to specify devices & colocation inside of cond_v2
and while_v2 input functions, and enabling non-strict evaluation & partial
pruning. This brings v2 control flow closer to feature parity with v1 control
flow.
However, we do not lower in the following cases:
- When the `If` or `While` ops are in the XLA context. Because it is easier
for XLA to apply its own optimizations when dealing with un-lowered
control flow operators than with low-level control flow primitives.
- When the eager execution context specifies the executor of functions to
be the single threaded executor (see context.function_executor_type()).
Because the single threaded executor does not support v1 control flow ops.
Args:
op: An `If` or `While` Operation.
"""
if (not control_flow_util.GraphOrParentsInXlaContext(op.graph) and
context.context().function_call_options.executor_type !=
"SINGLE_THREADED_EXECUTOR"):
# pylint: disable=protected-access
op._set_attr("_lower_using_switch_merge", attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
def maybe_propagate_compile_time_consts_in_xla(op):
"""Tells XLA whether to propagate compile-time consts in the loop body.
This is needed to make compile time constants available to ops, for example
`max_num_elements` in `EmptyTensorList`, inside the loop body. Ideally this
would always be turned on, but that doesn't work with legacy functionalized
while_loops.
Args:
op: A `While` Operation.
"""
if control_flow_util.GraphOrParentsInXlaContext(op.graph):
# pylint: disable=protected-access
op._set_attr("_xla_propagate_compile_time_consts",
attr_value_pb2.AttrValue(b=True))
# pylint: enable=protected-access
|
{
"content_hash": "df4d1bd4e297cf67cb582825a0efd2f8",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 80,
"avg_line_length": 32.936,
"alnum_prop": 0.7243138207432597,
"repo_name": "jbedorf/tensorflow",
"id": "cd37419906b1dc46f851cad35b83e93500261cf7",
"size": "4807",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/control_flow_util_v2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "647467"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59799751"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1508512"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908330"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94633"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15108"
},
{
"name": "Pascal",
"bytes": "770"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46379626"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "480235"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
}
|
"""The tests for the device tracker component."""
from datetime import datetime, timedelta
import json
import logging
import os
from unittest.mock import Mock, call
from asynctest import patch
import pytest
from homeassistant.components import zone
import homeassistant.components.device_tracker as device_tracker
from homeassistant.components.device_tracker import const, legacy
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_ENTITY_PICTURE,
ATTR_FRIENDLY_NAME,
ATTR_GPS_ACCURACY,
ATTR_HIDDEN,
ATTR_ICON,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_PLATFORM,
STATE_HOME,
STATE_NOT_HOME,
)
from homeassistant.core import State, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import discovery
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
assert_setup_component,
async_fire_time_changed,
mock_registry,
mock_restore_cache,
patch_yaml_files,
)
from tests.components.device_tracker import common
TEST_PLATFORM = {device_tracker.DOMAIN: {CONF_PLATFORM: "test"}}
_LOGGER = logging.getLogger(__name__)
@pytest.fixture(name="yaml_devices")
def mock_yaml_devices(hass):
"""Get a path for storing yaml devices."""
yaml_devices = hass.config.path(legacy.YAML_DEVICES)
if os.path.isfile(yaml_devices):
os.remove(yaml_devices)
yield yaml_devices
if os.path.isfile(yaml_devices):
os.remove(yaml_devices)
async def test_is_on(hass):
"""Test is_on method."""
entity_id = const.ENTITY_ID_FORMAT.format("test")
hass.states.async_set(entity_id, STATE_HOME)
assert device_tracker.is_on(hass, entity_id)
hass.states.async_set(entity_id, STATE_NOT_HOME)
assert not device_tracker.is_on(hass, entity_id)
async def test_reading_broken_yaml_config(hass):
"""Test when known devices contains invalid data."""
files = {
"empty.yaml": "",
"nodict.yaml": "100",
"badkey.yaml": "@:\n name: Device",
"noname.yaml": "my_device:\n",
"allok.yaml": "My Device:\n name: Device",
"oneok.yaml": ("My Device!:\n name: Device\n" "bad_device:\n nme: Device"),
}
args = {"hass": hass, "consider_home": timedelta(seconds=60)}
with patch_yaml_files(files):
assert await legacy.async_load_config("empty.yaml", **args) == []
assert await legacy.async_load_config("nodict.yaml", **args) == []
assert await legacy.async_load_config("noname.yaml", **args) == []
assert await legacy.async_load_config("badkey.yaml", **args) == []
res = await legacy.async_load_config("allok.yaml", **args)
assert len(res) == 1
assert res[0].name == "Device"
assert res[0].dev_id == "my_device"
res = await legacy.async_load_config("oneok.yaml", **args)
assert len(res) == 1
assert res[0].name == "Device"
assert res[0].dev_id == "my_device"
async def test_reading_yaml_config(hass, yaml_devices):
"""Test the rendering of the YAML configuration."""
dev_id = "test"
device = legacy.Device(
hass,
timedelta(seconds=180),
True,
dev_id,
"AB:CD:EF:GH:IJ",
"Test name",
picture="http://test.picture",
hide_if_away=True,
icon="mdi:kettle",
)
await hass.async_add_executor_job(
legacy.update_config, yaml_devices, dev_id, device
)
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
config = (await legacy.async_load_config(yaml_devices, hass, device.consider_home))[
0
]
assert device.dev_id == config.dev_id
assert device.track == config.track
assert device.mac == config.mac
assert device.config_picture == config.config_picture
assert device.away_hide == config.away_hide
assert device.consider_home == config.consider_home
assert device.icon == config.icon
@patch("homeassistant.components.device_tracker.const.LOGGER.warning")
async def test_duplicate_mac_dev_id(mock_warning, hass):
"""Test adding duplicate MACs or device IDs to DeviceTracker."""
devices = [
legacy.Device(
hass, True, True, "my_device", "AB:01", "My device", None, None, False
),
legacy.Device(
hass, True, True, "your_device", "AB:01", "Your device", None, None, False
),
]
legacy.DeviceTracker(hass, False, True, {}, devices)
_LOGGER.debug(mock_warning.call_args_list)
assert (
mock_warning.call_count == 1
), "The only warning call should be duplicates (check DEBUG)"
args, _ = mock_warning.call_args
assert "Duplicate device MAC" in args[0], "Duplicate MAC warning expected"
mock_warning.reset_mock()
devices = [
legacy.Device(
hass, True, True, "my_device", "AB:01", "My device", None, None, False
),
legacy.Device(
hass, True, True, "my_device", None, "Your device", None, None, False
),
]
legacy.DeviceTracker(hass, False, True, {}, devices)
_LOGGER.debug(mock_warning.call_args_list)
assert (
mock_warning.call_count == 1
), "The only warning call should be duplicates (check DEBUG)"
args, _ = mock_warning.call_args
assert "Duplicate device IDs" in args[0], "Duplicate device IDs warning expected"
async def test_setup_without_yaml_file(hass):
"""Test with no YAML file."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
async def test_gravatar(hass):
"""Test the Gravatar generation."""
dev_id = "test"
device = legacy.Device(
hass,
timedelta(seconds=180),
True,
dev_id,
"AB:CD:EF:GH:IJ",
"Test name",
gravatar="test@example.com",
)
gravatar_url = (
"https://www.gravatar.com/avatar/"
"55502f40dc8b7c769880b10874abc9d0.jpg?s=80&d=wavatar"
)
assert device.config_picture == gravatar_url
async def test_gravatar_and_picture(hass):
"""Test that Gravatar overrides picture."""
dev_id = "test"
device = legacy.Device(
hass,
timedelta(seconds=180),
True,
dev_id,
"AB:CD:EF:GH:IJ",
"Test name",
picture="http://test.picture",
gravatar="test@example.com",
)
gravatar_url = (
"https://www.gravatar.com/avatar/"
"55502f40dc8b7c769880b10874abc9d0.jpg?s=80&d=wavatar"
)
assert device.config_picture == gravatar_url
@patch("homeassistant.components.device_tracker.legacy.DeviceTracker.see")
@patch("homeassistant.components.demo.device_tracker.setup_scanner", autospec=True)
async def test_discover_platform(mock_demo_setup_scanner, mock_see, hass):
"""Test discovery of device_tracker demo platform."""
await discovery.async_load_platform(
hass, device_tracker.DOMAIN, "demo", {"test_key": "test_val"}, {"bla": {}}
)
await hass.async_block_till_done()
assert device_tracker.DOMAIN in hass.config.components
assert mock_demo_setup_scanner.called
assert mock_demo_setup_scanner.call_args[0] == (
hass,
{},
mock_see,
{"test_key": "test_val"},
)
async def test_update_stale(hass, mock_device_tracker_conf):
"""Test stalled update."""
scanner = getattr(hass.components, "test.device_tracker").SCANNER
scanner.reset()
scanner.come_home("DEV1")
register_time = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
scan_time = datetime(2015, 9, 15, 23, 1, tzinfo=dt_util.UTC)
with patch(
"homeassistant.components.device_tracker.legacy.dt_util.utcnow",
return_value=register_time,
):
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(
hass,
device_tracker.DOMAIN,
{
device_tracker.DOMAIN: {
CONF_PLATFORM: "test",
device_tracker.CONF_CONSIDER_HOME: 59,
}
},
)
await hass.async_block_till_done()
assert STATE_HOME == hass.states.get("device_tracker.dev1").state
scanner.leave_home("DEV1")
with patch(
"homeassistant.components.device_tracker.legacy.dt_util.utcnow",
return_value=scan_time,
):
async_fire_time_changed(hass, scan_time)
await hass.async_block_till_done()
assert STATE_NOT_HOME == hass.states.get("device_tracker.dev1").state
async def test_entity_attributes(hass, mock_device_tracker_conf):
"""Test the entity attributes."""
devices = mock_device_tracker_conf
dev_id = "test_entity"
entity_id = const.ENTITY_ID_FORMAT.format(dev_id)
friendly_name = "Paulus"
picture = "http://placehold.it/200x200"
icon = "mdi:kettle"
device = legacy.Device(
hass,
timedelta(seconds=180),
True,
dev_id,
None,
friendly_name,
picture,
hide_if_away=True,
icon=icon,
)
devices.append(device)
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
attrs = hass.states.get(entity_id).attributes
assert friendly_name == attrs.get(ATTR_FRIENDLY_NAME)
assert icon == attrs.get(ATTR_ICON)
assert picture == attrs.get(ATTR_ENTITY_PICTURE)
async def test_device_hidden(hass, mock_device_tracker_conf):
"""Test hidden devices."""
devices = mock_device_tracker_conf
dev_id = "test_entity"
entity_id = const.ENTITY_ID_FORMAT.format(dev_id)
device = legacy.Device(
hass, timedelta(seconds=180), True, dev_id, None, hide_if_away=True
)
devices.append(device)
scanner = getattr(hass.components, "test.device_tracker").SCANNER
scanner.reset()
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
assert hass.states.get(entity_id).attributes.get(ATTR_HIDDEN)
async def test_group_all_devices(hass, mock_device_tracker_conf):
"""Test grouping of devices."""
devices = mock_device_tracker_conf
dev_id = "test_entity"
entity_id = const.ENTITY_ID_FORMAT.format(dev_id)
device = legacy.Device(
hass, timedelta(seconds=180), True, dev_id, None, hide_if_away=True
)
devices.append(device)
scanner = getattr(hass.components, "test.device_tracker").SCANNER
scanner.reset()
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
await hass.async_block_till_done()
state = hass.states.get(device_tracker.ENTITY_ID_ALL_DEVICES)
assert state is not None
assert STATE_NOT_HOME == state.state
assert (entity_id,) == state.attributes.get(ATTR_ENTITY_ID)
@patch("homeassistant.components.device_tracker.legacy." "DeviceTracker.async_see")
async def test_see_service(mock_see, hass):
"""Test the see service with a unicode dev_id and NO MAC."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
params = {
"dev_id": "some_device",
"host_name": "example.com",
"location_name": "Work",
"gps": [0.3, 0.8],
"attributes": {"test": "test"},
}
common.async_see(hass, **params)
await hass.async_block_till_done()
assert mock_see.call_count == 1
assert mock_see.call_count == 1
assert mock_see.call_args == call(**params)
mock_see.reset_mock()
params["dev_id"] += chr(233) # e' acute accent from icloud
common.async_see(hass, **params)
await hass.async_block_till_done()
assert mock_see.call_count == 1
assert mock_see.call_count == 1
assert mock_see.call_args == call(**params)
async def test_see_service_guard_config_entry(hass, mock_device_tracker_conf):
"""Test the guard if the device is registered in the entity registry."""
mock_entry = Mock()
dev_id = "test"
entity_id = const.ENTITY_ID_FORMAT.format(dev_id)
mock_registry(hass, {entity_id: mock_entry})
devices = mock_device_tracker_conf
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
params = {"dev_id": dev_id, "gps": [0.3, 0.8]}
common.async_see(hass, **params)
await hass.async_block_till_done()
assert not devices
async def test_new_device_event_fired(hass, mock_device_tracker_conf):
"""Test that the device tracker will fire an event."""
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
test_events = []
@callback
def listener(event):
"""Record that our event got called."""
test_events.append(event)
hass.bus.async_listen("device_tracker_new_device", listener)
common.async_see(hass, "mac_1", host_name="hello")
common.async_see(hass, "mac_1", host_name="hello")
await hass.async_block_till_done()
assert len(test_events) == 1
# Assert we can serialize the event
json.dumps(test_events[0].as_dict(), cls=JSONEncoder)
assert test_events[0].data == {
"entity_id": "device_tracker.hello",
"host_name": "hello",
"mac": "MAC_1",
}
async def test_duplicate_yaml_keys(hass, mock_device_tracker_conf):
"""Test that the device tracker will not generate invalid YAML."""
devices = mock_device_tracker_conf
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
common.async_see(hass, "mac_1", host_name="hello")
common.async_see(hass, "mac_2", host_name="hello")
await hass.async_block_till_done()
assert len(devices) == 2
assert devices[0].dev_id != devices[1].dev_id
async def test_invalid_dev_id(hass, mock_device_tracker_conf):
"""Test that the device tracker will not allow invalid dev ids."""
devices = mock_device_tracker_conf
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
common.async_see(hass, dev_id="hello-world")
await hass.async_block_till_done()
assert not devices
async def test_see_state(hass, yaml_devices):
"""Test device tracker see records state correctly."""
assert await async_setup_component(hass, device_tracker.DOMAIN, TEST_PLATFORM)
params = {
"mac": "AA:BB:CC:DD:EE:FF",
"dev_id": "some_device",
"host_name": "example.com",
"location_name": "Work",
"gps": [0.3, 0.8],
"gps_accuracy": 1,
"battery": 100,
"attributes": {"test": "test", "number": 1},
}
common.async_see(hass, **params)
await hass.async_block_till_done()
config = await legacy.async_load_config(yaml_devices, hass, timedelta(seconds=0))
assert len(config) == 1
state = hass.states.get("device_tracker.example_com")
attrs = state.attributes
assert state.state == "Work"
assert state.object_id == "example_com"
assert state.name == "example.com"
assert attrs["friendly_name"] == "example.com"
assert attrs["battery"] == 100
assert attrs["latitude"] == 0.3
assert attrs["longitude"] == 0.8
assert attrs["test"] == "test"
assert attrs["gps_accuracy"] == 1
assert attrs["source_type"] == "gps"
assert attrs["number"] == 1
async def test_see_passive_zone_state(hass, mock_device_tracker_conf):
"""Test that the device tracker sets gps for passive trackers."""
register_time = datetime(2015, 9, 15, 23, tzinfo=dt_util.UTC)
scan_time = datetime(2015, 9, 15, 23, 1, tzinfo=dt_util.UTC)
with assert_setup_component(1, zone.DOMAIN):
zone_info = {
"name": "Home",
"latitude": 1,
"longitude": 2,
"radius": 250,
"passive": False,
}
await async_setup_component(hass, zone.DOMAIN, {"zone": zone_info})
scanner = getattr(hass.components, "test.device_tracker").SCANNER
scanner.reset()
scanner.come_home("dev1")
with patch(
"homeassistant.components.device_tracker.legacy.dt_util.utcnow",
return_value=register_time,
):
with assert_setup_component(1, device_tracker.DOMAIN):
assert await async_setup_component(
hass,
device_tracker.DOMAIN,
{
device_tracker.DOMAIN: {
CONF_PLATFORM: "test",
device_tracker.CONF_CONSIDER_HOME: 59,
}
},
)
await hass.async_block_till_done()
state = hass.states.get("device_tracker.dev1")
attrs = state.attributes
assert STATE_HOME == state.state
assert state.object_id == "dev1"
assert state.name == "dev1"
assert attrs.get("friendly_name") == "dev1"
assert attrs.get("latitude") == 1
assert attrs.get("longitude") == 2
assert attrs.get("gps_accuracy") == 0
assert attrs.get("source_type") == device_tracker.SOURCE_TYPE_ROUTER
scanner.leave_home("dev1")
with patch(
"homeassistant.components.device_tracker.legacy.dt_util.utcnow",
return_value=scan_time,
):
async_fire_time_changed(hass, scan_time)
await hass.async_block_till_done()
state = hass.states.get("device_tracker.dev1")
attrs = state.attributes
assert STATE_NOT_HOME == state.state
assert state.object_id == "dev1"
assert state.name == "dev1"
assert attrs.get("friendly_name") == "dev1"
assert attrs.get("latitude") is None
assert attrs.get("longitude") is None
assert attrs.get("gps_accuracy") is None
assert attrs.get("source_type") == device_tracker.SOURCE_TYPE_ROUTER
@patch("homeassistant.components.device_tracker.const.LOGGER.warning")
async def test_see_failures(mock_warning, hass, mock_device_tracker_conf):
"""Test that the device tracker see failures."""
devices = mock_device_tracker_conf
tracker = legacy.DeviceTracker(hass, timedelta(seconds=60), 0, {}, [])
# MAC is not a string (but added)
await tracker.async_see(mac=567, host_name="Number MAC")
# No device id or MAC(not added)
with pytest.raises(HomeAssistantError):
await tracker.async_see()
assert mock_warning.call_count == 0
# Ignore gps on invalid GPS (both added & warnings)
await tracker.async_see(mac="mac_1_bad_gps", gps=1)
await tracker.async_see(mac="mac_2_bad_gps", gps=[1])
await tracker.async_see(mac="mac_3_bad_gps", gps="gps")
await hass.async_block_till_done()
assert mock_warning.call_count == 3
assert len(devices) == 4
async def test_async_added_to_hass(hass):
"""Test restoring state."""
attr = {
ATTR_LONGITUDE: 18,
ATTR_LATITUDE: -33,
const.ATTR_SOURCE_TYPE: "gps",
ATTR_GPS_ACCURACY: 2,
const.ATTR_BATTERY: 100,
}
mock_restore_cache(hass, [State("device_tracker.jk", "home", attr)])
path = hass.config.path(legacy.YAML_DEVICES)
files = {path: "jk:\n name: JK Phone\n track: True"}
with patch_yaml_files(files):
assert await async_setup_component(hass, device_tracker.DOMAIN, {})
state = hass.states.get("device_tracker.jk")
assert state
assert state.state == "home"
for key, val in attr.items():
atr = state.attributes.get(key)
assert atr == val, "{}={} expected: {}".format(key, atr, val)
async def test_bad_platform(hass):
"""Test bad platform."""
config = {"device_tracker": [{"platform": "bad_platform"}]}
with assert_setup_component(0, device_tracker.DOMAIN):
assert await async_setup_component(hass, device_tracker.DOMAIN, config)
async def test_adding_unknown_device_to_config(mock_device_tracker_conf, hass):
"""Test the adding of unknown devices to configuration file."""
scanner = getattr(hass.components, "test.device_tracker").SCANNER
scanner.reset()
scanner.come_home("DEV1")
await async_setup_component(
hass, device_tracker.DOMAIN, {device_tracker.DOMAIN: {CONF_PLATFORM: "test"}}
)
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
device = mock_device_tracker_conf[0]
assert device.dev_id == "dev1"
assert device.track
async def test_picture_and_icon_on_see_discovery(mock_device_tracker_conf, hass):
"""Test that picture and icon are set in initial see."""
tracker = legacy.DeviceTracker(hass, timedelta(seconds=60), False, {}, [])
await tracker.async_see(dev_id=11, picture="pic_url", icon="mdi:icon")
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
assert mock_device_tracker_conf[0].icon == "mdi:icon"
assert mock_device_tracker_conf[0].entity_picture == "pic_url"
async def test_default_hide_if_away_is_used(mock_device_tracker_conf, hass):
"""Test that default track_new is used."""
tracker = legacy.DeviceTracker(
hass, timedelta(seconds=60), False, {device_tracker.CONF_AWAY_HIDE: True}, []
)
await tracker.async_see(dev_id=12)
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
assert mock_device_tracker_conf[0].away_hide
async def test_backward_compatibility_for_track_new(mock_device_tracker_conf, hass):
"""Test backward compatibility for track new."""
tracker = legacy.DeviceTracker(
hass, timedelta(seconds=60), False, {device_tracker.CONF_TRACK_NEW: True}, []
)
await tracker.async_see(dev_id=13)
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
assert mock_device_tracker_conf[0].track is False
async def test_old_style_track_new_is_skipped(mock_device_tracker_conf, hass):
"""Test old style config is skipped."""
tracker = legacy.DeviceTracker(
hass, timedelta(seconds=60), None, {device_tracker.CONF_TRACK_NEW: False}, []
)
await tracker.async_see(dev_id=14)
await hass.async_block_till_done()
assert len(mock_device_tracker_conf) == 1
assert mock_device_tracker_conf[0].track is False
def test_see_schema_allowing_ios_calls():
"""Test SEE service schema allows extra keys.
Temp work around because the iOS app sends incorrect data.
"""
device_tracker.SERVICE_SEE_PAYLOAD_SCHEMA(
{
"dev_id": "Test",
"battery": 35,
"battery_status": "Not Charging",
"gps": [10.0, 10.0],
"gps_accuracy": 300,
"hostname": "beer",
}
)
|
{
"content_hash": "b100f1d73e9079a64ea92c336e46671f",
"timestamp": "",
"source": "github",
"line_count": 682,
"max_line_length": 88,
"avg_line_length": 33.74486803519061,
"alnum_prop": 0.6429564612844355,
"repo_name": "joopert/home-assistant",
"id": "e839a88536eeb9f09e54416c2c883a8dd5d3a179",
"size": "23014",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "tests/components/device_tracker/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18670593"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
from gevent import monkey
monkey.patch_all()
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
from worker import OceanWorker
if __name__ == "__main__":
env = None
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
if arg == '--production':
env = 'production'
elif arg == '--development':
env = 'development'
worker = OceanWorker(environment=env)
try:
worker.start()
except KeyboardInterrupt:
worker.stop()
|
{
"content_hash": "50c77df028aff779fa07103dffce0c42",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 41,
"avg_line_length": 19.11111111111111,
"alnum_prop": 0.562015503875969,
"repo_name": "ddinsight/dd-streamworks",
"id": "76ceaafb8b1b437825bd27ec50a99cdeaf7f3e48",
"size": "1133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stream_worker/runworker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "249851"
},
{
"name": "Shell",
"bytes": "546"
}
],
"symlink_target": ""
}
|
"""Functionality for manipulating FHIRPath expressions."""
import copy
import dataclasses
import decimal
from typing import Any, List, Optional, Set, cast
from google.cloud import bigquery
from google.protobuf import message
from google.fhir.core.proto import fhirpath_replacement_list_pb2
from google.fhir.core import fhir_errors
from google.fhir.core.fhir_path import _ast
from google.fhir.core.fhir_path import _fhir_path_data_types
from google.fhir.core.fhir_path import _fhir_path_to_sql_functions
from google.fhir.core.fhir_path import _navigation
from google.fhir.core.fhir_path import _semant
from google.fhir.core.fhir_path import _sql_data_types
from google.fhir.core.fhir_path import _utils
from google.fhir.core.fhir_path import fhir_path_options
from google.fhir.core.utils import proto_utils
# TODO(b/201107372): Update FHIR-agnostic types to a protocol.
StructureDefinition = message.Message
ElementDefinition = message.Message
Constraint = message.Message
# See more at: https://github.com/FHIR/sql-on-fhir/blob/master/sql-on-fhir.md
_PRIMITIVE_TO_STANDARD_SQL_MAP = {
'base64Binary': _sql_data_types.String,
'boolean': _sql_data_types.Boolean,
'code': _sql_data_types.String,
'date': _sql_data_types.String,
'dateTime': _sql_data_types.String,
'decimal': _sql_data_types.Numeric,
'id': _sql_data_types.String,
'instant': _sql_data_types.String,
'integer': _sql_data_types.Int64,
'markdown': _sql_data_types.String,
'oid': _sql_data_types.String,
'positiveInt': _sql_data_types.Int64,
'string': _sql_data_types.String,
'time': _sql_data_types.String,
'unsignedInt': _sql_data_types.Int64,
'uri': _sql_data_types.String,
'xhtml': _sql_data_types.String,
}
# See more at:
# * https://github.com/FHIR/sql-on-fhir/blob/master/sql-on-fhir.md
# * https://www.hl7.org/fhir/fhirpath.html#types
_SYSTEM_PRIMITIVE_TO_STANDARD_SQL_MAP = {
'http://hl7.org/fhirpath/System.Boolean': _sql_data_types.Boolean,
'http://hl7.org/fhirpath/System.Date': _sql_data_types.String,
'http://hl7.org/fhirpath/System.DateTime': _sql_data_types.String,
'http://hl7.org/fhirpath/System.Decimal': _sql_data_types.Numeric,
'http://hl7.org/fhirpath/System.Integer': _sql_data_types.Int64,
'http://hl7.org/fhirpath/System.Quantity': _sql_data_types.OpaqueStruct,
'http://hl7.org/fhirpath/System.String': _sql_data_types.String,
'http://hl7.org/fhirpath/System.Time': _sql_data_types.String,
}
def _escape_identifier(identifier_value: str) -> str:
"""Returns the value surrounded by backticks if it is a keyword."""
# Keywords are case-insensitive
if identifier_value.upper() in _sql_data_types.STANDARD_SQL_KEYWORDS:
return f'`{identifier_value}`'
return identifier_value # No-op
def _get_analytic_path(element_definition: ElementDefinition) -> str:
"""Returns the identifying dot-separated (`.`) analytic path of the element.
The `analytic path` is:
- If the given element is a slice on an extension, it returns the element id
with the `extension` part discarded.
(e.g: if slice element id is `Foo.extension:slice`, it returns `Foo.slice`)
- Else, the element.path attribute.
Args:
element_definition: The element definition that we are operating on.
"""
if _utils.is_slice_on_extension(element_definition):
initial_path: str = cast(Any, element_definition).id.value
return initial_path.replace('extension:', '')
if not proto_utils.field_is_set(element_definition, 'path'):
raise ValueError(
f'Required field "path" is not set for {element_definition}.')
return cast(Any, element_definition).path.value
def _last_path_token(element_definition: ElementDefinition) -> str:
"""Returns `element_definition`'s last path token less the resource type.
For example:
* "Foo" returns "" (empty string)
* "Foo.bar" returns "bar"
* "Foo.bar.bats" returns "bats"
Args:
element_definition: The `ElementDefinition` whose relative path to return.
"""
path = _get_analytic_path(element_definition)
components_less_resource = path.split('.')[1:]
return components_less_resource[-1] if components_less_resource else ''
def _is_type(element_definition: ElementDefinition, type_code: str) -> bool:
"""Returns `True` if `element_definition` is of type, `type_code`."""
type_codes = _utils.element_type_codes(element_definition)
if len(type_codes) != 1:
return False
return type_codes[0] == type_code
def _is_primitive_typecode(type_code: str) -> bool:
"""Returns True if the given typecode is primitive. False otherwise."""
return (
type_code in _PRIMITIVE_TO_STANDARD_SQL_MAP or
# Ids are a special case of primitive that have their type code equal to
# 'http://hl7.org/fhirpath/System.String'.
type_code == 'http://hl7.org/fhirpath/System.String')
@dataclasses.dataclass
class SqlGenerationOptions:
"""Used by FhirProfileStandardSqlEncoder to define optional settings.
Attributes:
skip_keys: A set of constraint keys that should be skipped during encoding.
add_primitive_regexes: Whether or not to add constraints requiring primitive
fields to match their corresponding regex.
add_value_set_bindings: Whether or not to add constraints enforcing
membership of codes in the value sets defined by the implementation guide
expr_replace_list: A list that specifies fhir path expressions to be
replaced. It also specifies what they should be replaced with.
value_set_codes_table: The name of the database table containing value set
code definitions. Used when building SQL for memberOf expressions.
"""
skip_keys: Set[str] = dataclasses.field(default_factory=set)
add_primitive_regexes: bool = False
expr_replace_list: fhirpath_replacement_list_pb2.FHIRPathReplacementList = (
fhirpath_replacement_list_pb2.FHIRPathReplacementList())
add_value_set_bindings: bool = False
value_set_codes_table: bigquery.TableReference = None
class FhirPathStandardSqlEncoder(_ast.FhirPathAstBaseVisitor):
"""Encodes a FHIRPath Constraint into a Standard SQL expression."""
def __init__(
self,
structure_definitions: List[StructureDefinition],
options: Optional[SqlGenerationOptions] = None,
validation_options: Optional[
fhir_path_options.SqlValidationOptions] = None,
) -> None:
"""Creates a new instance of `FhirPathStandardSqlEncoder`.
Args:
structure_definitions: The list of `StructureDefinition`s comprising the
FHIR resource "graph" for traversal and encoding of constraints.
options: Optional settings for influencing SQL Generation.
validation_options: Optional settings for influencing validation behavior.
"""
self._env = _navigation._Environment(structure_definitions)
self._options = options or SqlGenerationOptions()
self._semantic_analyzer = _semant.FhirPathSemanticAnalyzer(
self._env, validation_options=validation_options)
# TODO(b/194290588): Perform recursive type inference on `STRUCT`s.
def _get_standard_sql_data_type(
self, element_definition: ElementDefinition
) -> _sql_data_types.StandardSqlDataType:
"""Return the Standard SQL data type describing the `ElementDefinition`.
Complex resources are returned as `OpaqueStruct` instances (no visibility
into field type).
Args:
element_definition: The `ElementDefinition` whose type to return.
Returns:
A Standard SQL data type describing the `ElementDefinition`.
"""
type_codes = _utils.element_type_codes(element_definition)
if len(type_codes) == 1:
uri_value: str = type_codes[0]
if uri_value in _PRIMITIVE_TO_STANDARD_SQL_MAP:
return _PRIMITIVE_TO_STANDARD_SQL_MAP[uri_value]
if uri_value in _SYSTEM_PRIMITIVE_TO_STANDARD_SQL_MAP:
return _SYSTEM_PRIMITIVE_TO_STANDARD_SQL_MAP[uri_value]
return _sql_data_types.OpaqueStruct # Empty `STRUCT`
def encode(self,
*,
structure_definition: StructureDefinition,
fhir_path_expression: str,
element_definition: Optional[ElementDefinition] = None,
select_scalars_as_array: bool = True) -> str:
"""Returns a Standard SQL encoding of a FHIRPath expression.
If select_scalars_as_array is True, the resulting Standard SQL encoding
always returns a top-level `ARRAY`, whose elements are non-`NULL`. Otherwise
the resulting SQL will attempt to return a scalar when possible and only
return an `ARRAY` for actual collections.
Args:
structure_definition: The containing type of `element_definition`.
fhir_path_expression: A fluent-style FHIRPath expression, e.g.:
`foo.bar.exists()`.
element_definition: The `ElementDefinition` that the
`fhir_path_expression` is relative to. If this is None, the root element
definition is used.
select_scalars_as_array: When True, always builds SQL selecting results in
an array. When False, attempts to build SQL returning scalars where
possible.
Returns:
A Standard SQL representation of the provided FHIRPath expression.
Raises:
ValueError: In the event that the provided `input_str` was syntactically
invalid FHIRPath that failed during lexing/parsing.
TypeError: In the event that errors occur during semantic analysis.
Meaning that the `input_str` was semantically invalid FHIRPath.
"""
ast = _ast.build_fhir_path_ast(fhir_path_expression)
if element_definition is None:
element_definition = _utils.get_root_element_definition(
structure_definition)
semant_error_reporter = fhir_errors.ListErrorReporter()
self._semantic_analyzer.add_semantic_annotations(
ast,
semant_error_reporter,
structure_definition,
element_definition,
)
if semant_error_reporter.errors:
semantic_errors = '.\n'.join(semant_error_reporter.errors)
raise TypeError('Unexpected errors during semantic analysis:\n%s' %
semantic_errors)
walker = _navigation.FhirStructureDefinitionWalker(
self._env,
structure_definition,
element_definition,
)
result = self.visit(ast, walker=walker)
if select_scalars_as_array or isinstance(ast.data_type,
_fhir_path_data_types.Collection):
return (f'ARRAY(SELECT {result.sql_alias}\n'
f'FROM {result.to_subquery()}\n'
f'WHERE {result.sql_alias} IS NOT NULL)')
else:
# Parenthesize raw SELECT so it can plug in anywhere an expression can.
return f'({result})'
def validate(self, structure_definition: StructureDefinition,
element_definition: ElementDefinition,
fhir_path_expression: str) -> fhir_errors.ListErrorReporter:
"""Validates the given FHIR path expression.
Validates a given FHIR path expression in the context of a structure
definition and element definition.
Args:
structure_definition: The containing type of `element_definition`.
element_definition: The `ElementDefinition` that the
`fhir_path_expression` is relative to.
fhir_path_expression: A fluent-style FHIRPath expression, e.g.:
`foo.bar.exists()`.
Returns:
An error reporter that will be populated with any errors / warnings
encountered.
"""
error_reporter = fhir_errors.ListErrorReporter()
try:
ast = _ast.build_fhir_path_ast(fhir_path_expression)
self._semantic_analyzer.add_semantic_annotations(
ast,
error_reporter,
structure_definition,
element_definition,
)
except ValueError as e:
error_reporter.report_conversion_error(
cast(Any, element_definition).path.value, str(e))
return error_reporter
def visit_literal(self, literal: _ast.Literal,
**unused_kwargs: Any) -> _sql_data_types.RawExpression:
"""Translates a FHIRPath literal to Standard SQL."""
if literal.value is None:
sql_value = 'NULL'
sql_data_type = _sql_data_types.Undefined
elif isinstance(literal.value, bool):
sql_value = str(literal).upper()
sql_data_type = _sql_data_types.Boolean
elif literal.is_date_type:
# Unfortunately, _ast.Literal does not differentiate how the Timestamp was
# given so it's nontrivial to parse the string correctly.
sql_value = f"'{literal.value}'"
sql_data_type = _sql_data_types.String
elif isinstance(literal.value, (str, _ast.Quantity)):
sql_value = f"'{literal.value}'" # Quote string literals for SQL
sql_data_type = _sql_data_types.String
elif isinstance(literal.value, int):
sql_value = str(literal)
sql_data_type = _sql_data_types.Int64
elif isinstance(literal.value, decimal.Decimal):
sql_value = str(literal)
sql_data_type = _sql_data_types.Numeric
else:
# Semantic analysis ensures that literal has to be one of the above cases.
# But we error out here in case we enter an illegal state.
raise ValueError(f'Unsupported literal value: {literal}.')
return _sql_data_types.RawExpression(
sql_value,
_sql_data_type=sql_data_type,
_sql_alias='literal_',
)
def visit_identifier(
self, identifier: _ast.Identifier, *,
walker: _navigation.FhirStructureDefinitionWalker
) -> _sql_data_types.IdentifierSelect:
"""Translates a FHIRPath member identifier to Standard SQL."""
# TODO(b/244184211): Handle "special" identifiers
# Advance the message context.
if identifier.value == '$this':
# If the identifier string is `$this`, then we don't have to advance the
# message context because `$this` is just a reference to the current
# identifier.
raw_identifier_str = _last_path_token(walker.element)
else:
walker.step(identifier.value)
raw_identifier_str = identifier.value
# Map to Standard SQL type. Note that we never map to a type of `ARRAY`,
# as the member encoding flattens any `ARRAY` members.
sql_data_type = self._get_standard_sql_data_type(walker.element)
identifier_str = _escape_identifier(raw_identifier_str)
if _utils.is_repeated_element(walker.element): # Array
# If the identifier is `$this`, we assume that the repeated field has been
# unnested upstream so we only need to reference it with its alias:
# `{}_element_`.
if identifier.value == '$this':
sql_alias = f'{raw_identifier_str}_element_'
return _sql_data_types.IdentifierSelect(
select_part=_sql_data_types.Identifier(sql_alias, sql_data_type),
from_part=None,
)
else:
sql_alias = f'{raw_identifier_str}_element_'
# When UNNEST-ing a repeated field, we always generate an offset column
# as well. If unused by the overall query, the expectation is that the
# BigQuery query optimizer will be able to detect the unused column and
# ignore it.
return _sql_data_types.IdentifierSelect(
select_part=_sql_data_types.Identifier(sql_alias, sql_data_type),
from_part=f'UNNEST({identifier_str}) AS {sql_alias} ' +
'WITH OFFSET AS element_offset',
)
else: # Scalar
return _sql_data_types.IdentifierSelect(
select_part=_sql_data_types.Identifier(identifier_str, sql_data_type),
from_part=None,
)
def visit_indexer(
self, indexer: _ast.Indexer, *,
walker: _navigation.FhirStructureDefinitionWalker
) -> _sql_data_types.Select:
"""Translates a FHIRPath indexer expression to Standard SQL.
Args:
indexer: The AST `_Indexer` node.
walker: A `FhirStructureDefinitionWalker` for traversing the underlying
FHIR implementation graph.
Returns:
A compiled Standard SQL expression.
Raises:
TypeError in the event that the `indexer.index` attribute is not of type
`Int64`.
"""
collection_result = self.visit(indexer.collection, walker=copy.copy(walker))
# Semantic analysis verifies that this is always an Integer.
index_result = self.visit(indexer.index, walker=copy.copy(walker))
# Intermediate indexed table subquery.
indexed_collection = ('SELECT ROW_NUMBER() OVER() AS row_,\n'
f'{collection_result.sql_alias}\n'
f'FROM {collection_result.to_subquery()}')
# Construct SQL expression; index must be a single integer per the FHIRPath
# grammar, so we can leverage a scalar subquery.
sql_alias = f'indexed_{collection_result.sql_alias}'
return _sql_data_types.Select(
select_part=_sql_data_types.Identifier(
collection_result.sql_alias,
collection_result.sql_data_type,
_sql_alias=sql_alias,
),
from_part=f'({indexed_collection}) AS inner_tbl',
where_part=f'(inner_tbl.row_ - 1) = {index_result.as_operand()}',
)
def visit_arithmetic(
self, arithmetic: _ast.Arithmetic, *,
walker: _navigation.FhirStructureDefinitionWalker
) -> _sql_data_types.Select:
"""Translates a FHIRPath arithmetic expression to Standard SQL.
Each operand is expected to be a collection of a single element. Both
operands must be of the same type, or of compatible types according to the
rules of implicit conversion.
Args:
arithmetic: The AST `_Arithmetic` node.
walker: A `FhirStructureDefinitionWalker` for traversing the underlying
FHIR implementation graph.
Returns:
A compiled Standard SQL expression.
Raises:
ValueError in the event that the generated Standard SQL represents an
incompatible arithmetic expression.
"""
lhs_result = self.visit(arithmetic.lhs, walker=copy.copy(walker))
rhs_result = self.visit(arithmetic.rhs, walker=copy.copy(walker))
sql_data_type = _sql_data_types.coerce(lhs_result.sql_data_type,
rhs_result.sql_data_type)
# Extract the values of LHS and RHS to be used as scalar subqueries.
lhs_subquery = lhs_result.as_operand()
rhs_subquery = rhs_result.as_operand()
# TODO(b/196238279): Handle <string> + <string> when either operand is empty
if sql_data_type == _sql_data_types.String:
sql_value = f'CONCAT({lhs_subquery}, {rhs_subquery})'
elif arithmetic.op == _ast.Arithmetic.Op.MODULO:
sql_value = f'MOD({lhs_subquery}, {rhs_subquery})'
elif arithmetic.op == _ast.Arithmetic.Op.TRUNCATED_DIVISION:
sql_value = f'DIV({lhs_subquery}, {rhs_subquery})'
else: # +, -, *, /
sql_value = f'({lhs_subquery} {arithmetic.op} {rhs_subquery})'
sql_alias = 'arith_'
return _sql_data_types.Select(
select_part=_sql_data_types.RawExpression(
sql_value, _sql_data_type=sql_data_type, _sql_alias=sql_alias),
from_part=None)
def visit_type_expression(
self, type_expression: _ast.TypeExpression, *,
walker: _navigation.FhirStructureDefinitionWalker
) -> _sql_data_types.StandardSqlExpression:
raise NotImplementedError('`visit_type_expression` is not yet implemented.')
# TODO(b/191895864): Equality relation against an empty collection will be
# truth-y, which is problematic for equals, but not equivalent-to.
# TODO(b/191896705): DateTimes are treated as `STRING`s in SQL; ensure
# timezone of 'Z' is respected/treated as +00:00.
# TODO(b/191895721): Verify equivalence order-dependence (documentation says
# it is *not* order-dependent, but HL7 JS implementation *is*).
def visit_equality(
self, relation: _ast.EqualityRelation, *,
walker: _navigation.FhirStructureDefinitionWalker
) -> _sql_data_types.Select:
"""Returns `TRUE` if the left collection is equal/equivalent to the right.
See more at: http://hl7.org/fhirpath/#equality.
Args:
relation: The AST `EqualityRelation` node.
walker: A `FhirStructureDefinitionWalker` for traversing the underlying
FHIR implementation graph.
Returns:
A compiled Standard SQL expression.
Raises:
ValueError in the event that the generated Standard SQL represents an
unsupported equality relation.
"""
lhs_result = self.visit(relation.lhs, walker=copy.copy(walker))
rhs_result = self.visit(relation.rhs, walker=copy.copy(walker))
# Semantic analysis ensures that the lhs and rhs are either directly
# comparable or implicitly comparable to each other.
if (relation.op == _ast.EqualityRelation.Op.EQUAL or
relation.op == _ast.EqualityRelation.Op.EQUIVALENT):
collection_check_func_name = 'NOT EXISTS'
scalar_check_op = '='
else: # NOT_*
collection_check_func_name = 'EXISTS'
scalar_check_op = '!='
sql_alias = 'eq_'
sql_data_type = _sql_data_types.Boolean
# Both sides are scalars.
if (not isinstance(relation.lhs.data_type, _fhir_path_data_types.Collection)
and not isinstance(relation.rhs.data_type,
_fhir_path_data_types.Collection)):
# Use the simpler query.
return _sql_data_types.Select(
select_part=_sql_data_types.RawExpression(
f'({lhs_result.as_operand()} '
f'{scalar_check_op} '
f'{rhs_result.as_operand()})',
_sql_data_type=sql_data_type,
_sql_alias=sql_alias),
from_part=None)
else:
sql_expr = ('SELECT lhs_.*\n'
'FROM (SELECT ROW_NUMBER() OVER() AS row_, '
f'{lhs_result.sql_alias}\n'
f'FROM {lhs_result.to_subquery()}) AS lhs_\n'
'EXCEPT DISTINCT\n'
'SELECT rhs_.*\n'
'FROM (SELECT ROW_NUMBER() OVER() AS row_, '
f'{rhs_result.sql_alias}\n'
f'FROM {rhs_result.to_subquery()}) AS rhs_')
return _sql_data_types.Select(
select_part=_sql_data_types.FunctionCall(
collection_check_func_name, (_sql_data_types.RawExpression(
sql_expr, _sql_data_type=_sql_data_types.Int64),),
_sql_data_type=sql_data_type,
_sql_alias=sql_alias),
from_part=None)
def visit_comparison(
self, comparison: _ast.Comparison, *,
walker: _navigation.FhirStructureDefinitionWalker
) -> _sql_data_types.Select:
"""Translates a FHIRPath comparison to Standard SQL.
Each operand is expected to be a collection of a single element. Operands
can be strings, integers, decimals, dates, datetimes, and times. Comparison
will perform implicit conversion between applicable types.
Args:
comparison: The FHIRPath AST `Comparison` node.
walker: A `FhirStructureDefinitionWalker` for traversing the underlying
FHIR implementation graph.
Returns:
A compiled Standard SQL expression.
Raises:
TypeError: In the event that coercion fails between the operands, or that
the resulting type is a `STRUCT`.
"""
lhs_result = self.visit(comparison.lhs, walker=copy.copy(walker))
rhs_result = self.visit(comparison.rhs, walker=copy.copy(walker))
# TODO(b/196239030): Leverage semantic analysis type information to make
# more nuanced decision (e.g. if Quantity, certain operations can be
# supported).
type_ = _sql_data_types.coerce(lhs_result.sql_data_type,
rhs_result.sql_data_type)
if isinstance(type_, _sql_data_types.Struct):
raise TypeError('Unsupported `STRUCT` logical comparison between '
f'{lhs_result} {comparison.op} {rhs_result}.')
# Extract the values of LHS and RHS to be used as scalar subqueries.
lhs_subquery = lhs_result.as_operand()
rhs_subquery = rhs_result.as_operand()
# A check in semantic analysis prevents us from reaching this code with
# incompatable types.
sql_value = f'({lhs_subquery} {comparison.op} {rhs_subquery})'
sql_alias = 'comparison_'
return _sql_data_types.Select(
select_part=_sql_data_types.RawExpression(
sql_value,
_sql_data_type=_sql_data_types.Boolean,
_sql_alias=sql_alias),
from_part=None)
def visit_boolean_logic(
self, boolean_logic: _ast.BooleanLogic, *,
walker: _navigation.FhirStructureDefinitionWalker
) -> _sql_data_types.Select:
"""Translates a FHIRPath Boolean logic operation to Standard SQL.
Note that evaluation for Boolean logic is only supported for Boolean
operands of scalar cardinality.
Args:
boolean_logic: The FHIRPath AST `BooleanLogic` node.
walker: A `FhirStructureDefinitionWalker` for traversing the underlying
FHIR implementation graph.
Returns:
A compiled Standard SQL expression.
Raises:
TypeError: In the event that either operand does not evaluate to a `BOOL`.
"""
lhs_result = self.visit(boolean_logic.lhs, walker=copy.copy(walker))
rhs_result = self.visit(boolean_logic.rhs, walker=copy.copy(walker))
# Extract boolean values from both sides if needed.
if lhs_result.sql_data_type != _sql_data_types.Boolean:
lhs_result = lhs_result.is_not_null()
if rhs_result.sql_data_type != _sql_data_types.Boolean:
rhs_result = rhs_result.is_not_null()
# Extract the values of LHS and RHS to be used as scalar subqueries.
lhs_subquery = lhs_result.as_operand()
rhs_subquery = rhs_result.as_operand()
if boolean_logic.op == _ast.BooleanLogic.Op.IMPLIES:
sql_value = f'(NOT {lhs_subquery} OR {rhs_subquery})'
elif boolean_logic.op == _ast.BooleanLogic.Op.XOR:
sql_value = f'({lhs_subquery} <> {rhs_subquery})'
else: # AND, OR
sql_value = f'({lhs_subquery} {boolean_logic.op.upper()} {rhs_subquery})'
sql_alias = 'logic_'
return _sql_data_types.Select(
select_part=_sql_data_types.RawExpression(
sql_value,
_sql_data_type=_sql_data_types.Boolean,
_sql_alias=sql_alias),
from_part=None)
def visit_membership(
self, relation: _ast.MembershipRelation, *,
walker: _navigation.FhirStructureDefinitionWalker
) -> _sql_data_types.Select:
"""Translates a FHIRPath membership relation to Standard SQL.
For the `IN` relation, the LHS operand is assumed to be a collection of a
single value. For 'CONTAINS', the RHS operand is assumed to be a collection
of a single value.
Args:
relation: The FHIRPath AST `MembershipRelation` node.
walker: A `FhirStructureDefinitionWalker` for traversing the underlying
FHIR implementation graph.
Returns:
A compiled Standard SQL expression.
"""
lhs_result = self.visit(relation.lhs, walker=copy.copy(walker))
rhs_result = self.visit(relation.rhs, walker=copy.copy(walker))
# SELECT (<lhs>) IN(<rhs>) AS mem_
# Where relation.op \in {IN, CONTAINS}; `CONTAINS` is the converse of `IN`
in_lhs = (
lhs_result
if relation.op == _ast.MembershipRelation.Op.IN else rhs_result)
in_rhs = (
rhs_result
if relation.op == _ast.MembershipRelation.Op.IN else lhs_result)
sql_expr = (f'({in_lhs.as_operand()})\n' f'IN ({in_rhs.as_operand()})')
return _sql_data_types.Select(
select_part=_sql_data_types.RawExpression(
sql_expr,
_sql_data_type=_sql_data_types.Boolean,
_sql_alias='mem_',
),
from_part=None)
def visit_union(
self, union: _ast.UnionOp, *,
walker: _navigation.FhirStructureDefinitionWalker
) -> _sql_data_types.UnionExpression:
"""Merge two collections into a single *distinct* collection.
Args:
union: The FHIRPath AST `UnionOp` node.
walker: A `FhirStructureDefinitionWalker` for traversing the underlying
FHIR implementation graph.
Returns:
A compiled Standard SQL expression.
"""
lhs_result = self.visit(union.lhs, walker=copy.copy(walker))
rhs_result = self.visit(union.rhs, walker=copy.copy(walker))
# Supported in FHIRPath, but currently generates invalid Standard SQL.
if (isinstance(lhs_result.sql_data_type, _sql_data_types.Struct) or
isinstance(rhs_result.sql_data_type, _sql_data_types.Struct)):
raise TypeError(
f'Unsupported `STRUCT` union between {lhs_result}, {rhs_result}.')
sql_alias = 'union_'
lhs = _sql_data_types.Select(
select_part=_sql_data_types.Identifier(
('lhs_', lhs_result.sql_alias),
_sql_alias=sql_alias,
_sql_data_type=lhs_result.sql_data_type),
from_part=f'{lhs_result.to_subquery()} AS lhs_')
rhs = _sql_data_types.Select(
select_part=_sql_data_types.Identifier(
('rhs_', rhs_result.sql_alias),
_sql_alias=sql_alias,
_sql_data_type=rhs_result.sql_data_type),
from_part=f'{rhs_result.to_subquery()} AS rhs_',
)
return lhs.union(rhs, distinct=True)
def visit_polarity(
self, polarity: _ast.Polarity, *,
walker: _navigation.FhirStructureDefinitionWalker
) -> _sql_data_types.Select:
"""Translates FHIRPath unary polarity (+/-) to Standard SQL."""
operand_result = self.visit(polarity.operand, walker=walker)
sql_expr = f'{polarity.op}{operand_result.as_operand()}'
sql_alias = 'pol_'
# For consistency with visit_polarity in FhirPathCompilerVisitor.
if isinstance(polarity.operand, _ast.Literal):
sql_alias = 'literal_'
return _sql_data_types.Select(
select_part=_sql_data_types.RawExpression(
sql_expr,
_sql_data_type=operand_result.sql_data_type,
_sql_alias=sql_alias,
),
from_part=None)
def visit_invocation(
self, invocation: _ast.Invocation, *,
walker: _navigation.FhirStructureDefinitionWalker
) -> _sql_data_types.StandardSqlExpression:
"""Translates a FHIRPath invocation to Standard SQL."""
# Function invocation
if isinstance(invocation.rhs, _ast.Function):
return self.visit_function(
invocation.rhs, operand=invocation.lhs, walker=walker)
# Member invocation
# TODO(b/244184211): Most of the RHS encoding is redudant, since we need to
# "stitch" it together with the LHS. Rework this.
# As is, we need to call visit on both lhs and rhs to increment the walker.
lhs_result = self.visit(invocation.lhs, walker=walker)
rhs_result = self.visit(invocation.rhs, walker=walker)
# RHS must always be an identifier. If repeated, then this is an ARRAY value
# which needs to be "unpacked" to a table. Semantic analysis should error
# out before this point if this is not the case.
rhs_identifier = str(invocation.rhs)
if _utils.is_repeated_element(walker.element):
# When UNNEST-ing a repeated field, we always generate an offset column as
# well. If unused by the overall query, the expectation is that the
# BigQuery query optimizer will be able to detect the unused column and
# ignore it.
return _sql_data_types.IdentifierSelect(
select_part=_sql_data_types.Identifier(rhs_result.sql_alias,
rhs_result.sql_data_type),
from_part=(
f'{lhs_result.to_subquery()},\n'
f'UNNEST({lhs_result.sql_alias}.{rhs_identifier}) '
f'AS {rhs_result.sql_alias} '
# As mentioned
'WITH OFFSET AS element_offset'),
)
else:
# Append the rhs to the path chain being selected.
# Including the from & where clauses of the lhs.
return dataclasses.replace(
lhs_result,
select_part=lhs_result.select_part.dot(
rhs_identifier,
rhs_result.sql_data_type,
sql_alias=rhs_result.sql_alias,
))
def visit_function(
self,
function: _ast.Function,
*,
walker: _navigation.FhirStructureDefinitionWalker,
operand: Optional[_ast.Expression] = None) -> _sql_data_types.Select:
"""Translates a FHIRPath function to Standard SQL."""
# Encode the operand, if present, and potentially mutate the `ctx`
operand_result = (
self.visit(operand, walker=walker) if operand is not None else None)
# Encode each parameter with a shallow-copy of `ctx`
params_result = [
self.visit(p, walker=copy.copy(walker)) for p in function.params
]
# Semantic analysis should error out before here if an invalid function is
# used.
func = _fhir_path_to_sql_functions.FUNCTION_MAP.get(
function.identifier.value)
# If the function is ofType, propagate its chosen type to the walker.
if function.identifier.value == _ast.Function.Name.OF_TYPE:
walker.selected_choice_type = str(function.params[0])
if function.identifier.value == _ast.Function.Name.MEMBER_OF:
kwargs = {}
if self._options.value_set_codes_table is not None:
kwargs['value_set_codes_table'] = str(
self._options.value_set_codes_table)
return func(function, operand_result, params_result, **kwargs)
else:
return func(function, operand_result, params_result)
|
{
"content_hash": "b1b22842083b1399ade001b759527d1b",
"timestamp": "",
"source": "github",
"line_count": 830,
"max_line_length": 80,
"avg_line_length": 40.310843373493974,
"alnum_prop": 0.668300555920856,
"repo_name": "google/fhir-py",
"id": "6158c8dc40a6adc9fcd0c0ea45fadc26ff1c883d",
"size": "34035",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google-fhir-core/google/fhir/core/fhir_path/fhir_path.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1449008"
},
{
"name": "Shell",
"bytes": "1667"
}
],
"symlink_target": ""
}
|
from examples import email
e = email.Emailer()
email.Emailer.MakeAnnouncement("nobody@example.com")
e.Bonjour()
|
{
"content_hash": "26842b2ab03522d7cf96862a4345682c",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 52,
"avg_line_length": 22.6,
"alnum_prop": 0.7787610619469026,
"repo_name": "pombredanne/pytype",
"id": "49004e23c1665a2ba1e9191f9483e2d3989888b6",
"size": "774",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pytype/pytd/demo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1030401"
}
],
"symlink_target": ""
}
|
"""Ce fichier contient l'éditeur EdtChambres, détaillé plus bas."""
from primaires.interpreteur.editeur import Editeur
from primaires.interpreteur.editeur.env_objet import EnveloppeObjet
from secondaires.auberge.editeurs.aubedit.edt_chambre import EdtChambre
from primaires.format.fonctions import supprimer_accents
class EdtChambres(Editeur):
"""Contexte-éditeur des chambres d'une auberge."""
def __init__(self, pere, objet=None, attribut=None):
"""Constructeur de l'éditeur"""
Editeur.__init__(self, pere, objet, attribut)
self.ajouter_option("c", self.opt_creer_chambre)
self.ajouter_option("d", self.opt_supprimer_chambre)
def accueil(self):
"""Message d'accueil du contexte"""
auberge = self.objet
msg = "| |tit|" + "Édition des chambres de l'auberge {}".format(
auberge.cle).ljust(76)
msg += "|ff||\n" + self.opts.separateur + "\n"
msg += "Cet éditeur vous permet d'ajouter ou de supprimer des\n" \
"chambres dans cette auberge. Entrez simplement son " \
"numéro pour\nl'éditer.\n\n" \
"Options disponibles :\n" \
" |cmd|/c <numéro> <identifiant_de_salle>|ff| pour " \
"ajouter une chambre\n" \
" |cmd|/d <numéro>|ff| pour supprimer une chambre\n\n" \
"Exemplels :\n" \
"|ent|/c 1 zone:cle|ff|\n" \
"|ent|/c suite zone:cle|ff|\n" \
"|ent|/d 1|ff|\n" \
"(Notez que le numéro n'est pas nécessairement un nombre.\n\n"
msg += "Chambres définies :\n"
if len(auberge.chambres) == 0:
msg += "\n Aucune"
else:
chambres = sorted([c for c in auberge.chambres.values()],
key=lambda c: c.numero)
for chambre in chambres:
msg += "\n |ent|" + chambre.numero + "|ff|"
msg += " vers " + chambre.ident_salle
return msg
def opt_creer_chambre(self, arguments):
"""Ajoute une chambre.
Syntaxe :
/a <numéro> <ident_salle>
"""
arguments = arguments.lower()
auberge = self.objet
if arguments.strip() == "":
self.pere << "|err|Précisez |ent|un numéro|ff| et |ent|un " \
"identifiant de salle.|ff|"
return
try:
numero, ident = arguments.split(" ")
except ValueError:
self.pere << "|err|Syntaxe invalide : |ent|/a <numéro> " \
"<ident_salle>|ff|"
return
if numero in auberge.numero_chambres:
self.pere << "|err|Ce numéro est déjà utilisé.|ff|"
return
try:
salle = importeur.salle[ident]
except KeyError:
self.pere << "|err|La salle '{}' est introuvable.|ff|".format(
ident)
return
auberge.ajouter_chambre(numero, salle)
self.actualiser()
def opt_supprimer_chambre(self, arguments):
"""Supprime la chambre passée en paramètre.
Syntaxe :
/d <numéro>
"""
auberge = self.objet
chambre = auberge.get_chambre_avec_numero(arguments)
if chambre:
auberge.supprimer_chambre(chambre.ident_salle)
self.actualiser()
else:
self.pere << "|err|Chambre introuvable.|ff|"
def interpreter(self, msg):
"""Interprétation de l'éditeur"""
auberge = self.objet
chambre = auberge.get_chambre_avec_numero(msg)
if chambre:
enveloppe = EnveloppeObjet(EdtChambre, chambre)
enveloppe.parent = self
contexte = enveloppe.construire(self.pere)
self.migrer_contexte(contexte)
else:
self.pere << "|err|Chambre {} introuvable.|ff|".format(repr(msg))
|
{
"content_hash": "46e053a52180bb08ec8954c3916cc494",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 78,
"avg_line_length": 35.99082568807339,
"alnum_prop": 0.549324496558756,
"repo_name": "stormi/tsunami",
"id": "74143692d7c5b6e470626a14c3478b000f5ad467",
"size": "5512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/secondaires/auberge/editeurs/aubedit/edt_chambres.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
}
|
import base64
import datetime
import json
import re
from azurelinuxagent.common.utils import timeutil
from azurelinuxagent.common.utils.textutil import parse_doc, find, findall
from tests.protocol.HttpRequestPredicates import HttpRequestPredicates
from tests.tools import load_bin_data, load_data, MagicMock, Mock
from azurelinuxagent.common.protocol.imds import IMDS_ENDPOINT
from azurelinuxagent.common.exception import HttpError, ResourceGoneError
from azurelinuxagent.common.future import httpclient
from azurelinuxagent.common.utils.cryptutil import CryptUtil
DATA_FILE = {
"version_info": "wire/version_info.xml",
"goal_state": "wire/goal_state.xml",
"hosting_env": "wire/hosting_env.xml",
"shared_config": "wire/shared_config.xml",
"certs": "wire/certs.xml",
"ext_conf": "wire/ext_conf.xml",
"manifest": "wire/manifest.xml",
"ga_manifest": "wire/ga_manifest.xml",
"trans_prv": "wire/trans_prv",
"trans_cert": "wire/trans_cert",
"test_ext": "ext/sample_ext-1.3.0.zip",
"imds_info": "imds/valid.json",
"remote_access": None,
"in_vm_artifacts_profile": None,
"vm_settings": None,
"ETag": None
}
DATA_FILE_IN_VM_ARTIFACTS_PROFILE = DATA_FILE.copy()
DATA_FILE_IN_VM_ARTIFACTS_PROFILE["ext_conf"] = "wire/ext_conf_in_vm_artifacts_profile.xml"
DATA_FILE_IN_VM_ARTIFACTS_PROFILE["in_vm_artifacts_profile"] = "wire/in_vm_artifacts_profile.json"
DATA_FILE_IN_VM_META_DATA = DATA_FILE.copy()
DATA_FILE_IN_VM_META_DATA["ext_conf"] = "wire/ext_conf_in_vm_metadata.xml"
DATA_FILE_INVALID_VM_META_DATA = DATA_FILE.copy()
DATA_FILE_INVALID_VM_META_DATA["ext_conf"] = "wire/ext_conf_invalid_vm_metadata.xml"
DATA_FILE_NO_EXT = DATA_FILE.copy()
DATA_FILE_NO_EXT["ext_conf"] = "wire/ext_conf_no_extensions-block_blob.xml"
DATA_FILE_NOOP_GS = DATA_FILE.copy()
DATA_FILE_NOOP_GS["goal_state"] = "wire/goal_state_noop.xml"
DATA_FILE_NOOP_GS["ext_conf"] = None
DATA_FILE_EXT_NO_SETTINGS = DATA_FILE.copy()
DATA_FILE_EXT_NO_SETTINGS["ext_conf"] = "wire/ext_conf_no_settings.xml"
DATA_FILE_EXT_NO_PUBLIC = DATA_FILE.copy()
DATA_FILE_EXT_NO_PUBLIC["ext_conf"] = "wire/ext_conf_no_public.xml"
DATA_FILE_EXT_AUTOUPGRADE = DATA_FILE.copy()
DATA_FILE_EXT_AUTOUPGRADE["ext_conf"] = "wire/ext_conf_autoupgrade.xml"
DATA_FILE_EXT_INTERNALVERSION = DATA_FILE.copy()
DATA_FILE_EXT_INTERNALVERSION["ext_conf"] = "wire/ext_conf_internalversion.xml"
DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION = DATA_FILE.copy()
DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION["ext_conf"] = "wire/ext_conf_autoupgrade_internalversion.xml"
DATA_FILE_EXT_ROLLINGUPGRADE = DATA_FILE.copy()
DATA_FILE_EXT_ROLLINGUPGRADE["ext_conf"] = "wire/ext_conf_upgradeguid.xml"
DATA_FILE_EXT_SEQUENCING = DATA_FILE.copy()
DATA_FILE_EXT_SEQUENCING["ext_conf"] = "wire/ext_conf_sequencing.xml"
DATA_FILE_EXT_ADDITIONAL_LOCATIONS = DATA_FILE.copy()
DATA_FILE_EXT_ADDITIONAL_LOCATIONS["ext_conf"] = "wire/ext_conf_additional_locations.xml"
DATA_FILE_EXT_DELETION = DATA_FILE.copy()
DATA_FILE_EXT_DELETION["manifest"] = "wire/manifest_deletion.xml"
DATA_FILE_EXT_SINGLE = DATA_FILE.copy()
DATA_FILE_EXT_SINGLE["manifest"] = "wire/manifest_deletion.xml"
DATA_FILE_MULTIPLE_EXT = DATA_FILE.copy()
DATA_FILE_MULTIPLE_EXT["ext_conf"] = "wire/ext_conf_multiple_extensions.xml"
DATA_FILE_CASE_MISMATCH_EXT = DATA_FILE.copy()
DATA_FILE_CASE_MISMATCH_EXT["ext_conf"] = "wire/ext_conf_settings_case_mismatch.xml"
DATA_FILE_NO_CERT_FORMAT = DATA_FILE.copy()
DATA_FILE_NO_CERT_FORMAT["certs"] = "wire/certs_no_format_specified.xml"
DATA_FILE_CERT_FORMAT_NOT_PFX = DATA_FILE.copy()
DATA_FILE_CERT_FORMAT_NOT_PFX["certs"] = "wire/certs_format_not_pfx.xml"
DATA_FILE_REMOTE_ACCESS = DATA_FILE.copy()
DATA_FILE_REMOTE_ACCESS["goal_state"] = "wire/goal_state_remote_access.xml"
DATA_FILE_REMOTE_ACCESS["remote_access"] = "wire/remote_access_single_account.xml"
DATA_FILE_PLUGIN_SETTINGS_MISMATCH = DATA_FILE.copy()
DATA_FILE_PLUGIN_SETTINGS_MISMATCH["ext_conf"] = "wire/invalid_config/ext_conf_plugin_settings_version_mismatch.xml"
DATA_FILE_REQUIRED_FEATURES = DATA_FILE.copy()
DATA_FILE_REQUIRED_FEATURES["ext_conf"] = "wire/ext_conf_required_features.xml"
DATA_FILE_VM_SETTINGS = DATA_FILE.copy()
DATA_FILE_VM_SETTINGS["vm_settings"] = "hostgaplugin/vm_settings.json"
DATA_FILE_VM_SETTINGS["ETag"] = "1"
DATA_FILE_VM_SETTINGS["ext_conf"] = "hostgaplugin/ext_conf.xml"
DATA_FILE_VM_SETTINGS["in_vm_artifacts_profile"] = "hostgaplugin/in_vm_artifacts_profile.json"
class WireProtocolData(object):
def __init__(self, data_files=None):
if data_files is None:
data_files = DATA_FILE
self.emulate_stale_goal_state = False
self.call_counts = {
"comp=versions": 0,
"/versions": 0,
"/health": 0,
"/HealthService": 0,
"/vmAgentLog": 0,
"goalstate": 0,
"hostingEnvironmentConfig": 0,
"sharedConfig": 0,
"certificates": 0,
"extensionsConfig": 0,
"remoteaccessinfouri": 0,
"extensionArtifact": 0,
"agentArtifact": 0,
"manifest.xml": 0,
"manifest_of_ga.xml": 0,
"ExampleHandlerLinux": 0,
"in_vm_artifacts_profile": 0,
"vm_settings": 0
}
self.status_blobs = []
self.data_files = data_files
self.version_info = None
self.goal_state = None
self.hosting_env = None
self.shared_config = None
self.certs = None
self.ext_conf = None
self.manifest = None
self.ga_manifest = None
self.trans_prv = None
self.trans_cert = None
self.ext = None
self.remote_access = None
self.in_vm_artifacts_profile = None
self.vm_settings = None
self.etag = None
self.imds_info = None
self.reload()
def reload(self):
self.version_info = load_data(self.data_files.get("version_info"))
self.goal_state = load_data(self.data_files.get("goal_state"))
self.hosting_env = load_data(self.data_files.get("hosting_env"))
self.shared_config = load_data(self.data_files.get("shared_config"))
self.certs = load_data(self.data_files.get("certs"))
self.ext_conf = self.data_files.get("ext_conf")
if self.ext_conf is not None:
self.ext_conf = load_data(self.ext_conf)
self.manifest = load_data(self.data_files.get("manifest"))
self.ga_manifest = load_data(self.data_files.get("ga_manifest"))
self.trans_prv = load_data(self.data_files.get("trans_prv"))
self.trans_cert = load_data(self.data_files.get("trans_cert"))
self.imds_info = json.loads(load_data(self.data_files.get("imds_info")))
self.ext = load_bin_data(self.data_files.get("test_ext"))
vm_settings = self.data_files.get("vm_settings")
if vm_settings is not None:
self.vm_settings = load_data(self.data_files.get("vm_settings"))
self.etag = self.data_files.get("ETag")
remote_access_data_file = self.data_files.get("remote_access")
if remote_access_data_file is not None:
self.remote_access = load_data(remote_access_data_file)
in_vm_artifacts_profile_file = self.data_files.get("in_vm_artifacts_profile")
if in_vm_artifacts_profile_file is not None:
self.in_vm_artifacts_profile = load_data(in_vm_artifacts_profile_file)
def reset_call_counts(self):
for counter in self.call_counts:
self.call_counts[counter] = 0
def mock_http_get(self, url, *_, **kwargs):
content = ''
response_headers = []
resp = MagicMock()
resp.status = httpclient.OK
if "comp=versions" in url: # wire server versions
content = self.version_info
self.call_counts["comp=versions"] += 1
elif "/versions" in url: # HostPlugin versions
content = '["2015-09-01"]'
self.call_counts["/versions"] += 1
elif url.endswith("/health"): # HostPlugin health
content = ''
self.call_counts["/health"] += 1
elif "goalstate" in url:
content = self.goal_state
self.call_counts["goalstate"] += 1
elif HttpRequestPredicates.is_hosting_environment_config_request(url):
content = self.hosting_env
self.call_counts["hostingEnvironmentConfig"] += 1
elif HttpRequestPredicates.is_shared_config_request(url):
content = self.shared_config
self.call_counts["sharedConfig"] += 1
elif HttpRequestPredicates.is_certificates_request(url):
content = self.certs
self.call_counts["certificates"] += 1
elif HttpRequestPredicates.is_extensions_config_request(url):
content = self.ext_conf
self.call_counts["extensionsConfig"] += 1
elif "remoteaccessinfouri" in url:
content = self.remote_access
self.call_counts["remoteaccessinfouri"] += 1
elif ".vmSettings" in url or ".settings" in url:
content = self.in_vm_artifacts_profile
self.call_counts["in_vm_artifacts_profile"] += 1
elif "/vmSettings" in url:
if self.vm_settings is None:
resp.status = httpclient.NOT_FOUND
else:
content = self.vm_settings
response_headers = [('ETag', self.etag)]
self.call_counts["vm_settings"] += 1
elif '{0}/metadata/compute'.format(IMDS_ENDPOINT) in url:
content = json.dumps(self.imds_info.get("compute", "{}"))
else:
# A stale GoalState results in a 400 from the HostPlugin
# for which the HTTP handler in restutil raises ResourceGoneError
if self.emulate_stale_goal_state:
if "extensionArtifact" in url:
self.emulate_stale_goal_state = False
self.call_counts["extensionArtifact"] += 1
raise ResourceGoneError()
else:
raise HttpError()
# For HostPlugin requests, replace the URL with that passed
# via the x-ms-artifact-location header
if "extensionArtifact" in url:
self.call_counts["extensionArtifact"] += 1
if "headers" not in kwargs:
raise ValueError("HostPlugin request is missing the HTTP headers: {0}", kwargs) # pylint: disable=raising-format-tuple
if "x-ms-artifact-location" not in kwargs["headers"]:
raise ValueError("HostPlugin request is missing the x-ms-artifact-location header: {0}", kwargs) # pylint: disable=raising-format-tuple
url = kwargs["headers"]["x-ms-artifact-location"]
if "manifest.xml" in url:
content = self.manifest
self.call_counts["manifest.xml"] += 1
elif HttpRequestPredicates.is_ga_manifest_request(url):
content = self.ga_manifest
self.call_counts["manifest_of_ga.xml"] += 1
elif "ExampleHandlerLinux" in url:
content = self.ext
self.call_counts["ExampleHandlerLinux"] += 1
resp.read = Mock(return_value=content)
return resp
elif ".vmSettings" in url or ".settings" in url:
content = self.in_vm_artifacts_profile
self.call_counts["in_vm_artifacts_profile"] += 1
else:
raise NotImplementedError(url)
resp.read = Mock(return_value=content.encode("utf-8"))
resp.getheaders = Mock(return_value=response_headers)
return resp
def mock_http_post(self, url, *_, **__):
content = None
resp = MagicMock()
resp.status = httpclient.OK
if url.endswith('/HealthService'):
self.call_counts['/HealthService'] += 1
content = ''
else:
raise NotImplementedError(url)
resp.read = Mock(return_value=content.encode("utf-8"))
return resp
def mock_http_put(self, url, data, **_):
content = ''
resp = MagicMock()
resp.status = httpclient.OK
if url.endswith('/vmAgentLog'):
self.call_counts['/vmAgentLog'] += 1
elif HttpRequestPredicates.is_storage_status_request(url):
self.status_blobs.append(data)
elif HttpRequestPredicates.is_host_plugin_status_request(url):
self.status_blobs.append(WireProtocolData.get_status_blob_from_hostgaplugin_put_status_request(content))
else:
raise NotImplementedError(url)
resp.read = Mock(return_value=content.encode("utf-8"))
return resp
def mock_crypt_util(self, *args, **kw):
# Partially patch instance method of class CryptUtil
cryptutil = CryptUtil(*args, **kw)
cryptutil.gen_transport_cert = Mock(side_effect=self.mock_gen_trans_cert)
return cryptutil
def mock_gen_trans_cert(self, trans_prv_file, trans_cert_file):
with open(trans_prv_file, 'w+') as prv_file:
prv_file.write(self.trans_prv)
with open(trans_cert_file, 'w+') as cert_file:
cert_file.write(self.trans_cert)
@staticmethod
def get_status_blob_from_hostgaplugin_put_status_request(data):
status_object = json.loads(data)
content = status_object["content"]
return base64.b64decode(content)
def get_no_of_plugins_in_extension_config(self):
if self.ext_conf is None:
return 0
ext_config_doc = parse_doc(self.ext_conf)
plugins_list = find(ext_config_doc, "Plugins")
return len(findall(plugins_list, "Plugin"))
def get_no_of_extensions_in_config(self):
if self.ext_conf is None:
return 0
ext_config_doc = parse_doc(self.ext_conf)
plugin_settings = find(ext_config_doc, "PluginSettings")
return len(findall(plugin_settings, "ExtensionRuntimeSettings")) + len(
findall(plugin_settings, "RuntimeSettings"))
#
# Having trouble reading the regular expressions below? you are not alone!
#
# For the use of "(?<=" "(?=" see 7.2.1 in https://docs.python.org/3.1/library/re.html
# For the use of "\g<1>" see backreferences in https://docs.python.org/3.1/library/re.html#re.sub
#
# Note that these regular expressions are not enough to parse all valid XML documents (e.g. they do
# not account for metacharacters like < or > in the values) but they are good enough for the test
# data. There are some basic checks, but the functions may not match valid XML or produce invalid
# XML if their input is too complex.
#
@staticmethod
def replace_xml_element_value(xml_document, element_name, element_value):
element_regex = r'(?<=<{0}>).+(?=</{0}>)'.format(element_name)
if not re.search(element_regex, xml_document):
raise Exception("Can't find XML element '{0}' in {1}".format(element_name, xml_document))
return re.sub(element_regex, element_value, xml_document)
@staticmethod
def replace_xml_attribute_value(xml_document, element_name, attribute_name, attribute_value):
attribute_regex = r'(?<=<{0} )(.*{1}=")[^"]+(?="[^>]*>)'.format(element_name, attribute_name)
if not re.search(attribute_regex, xml_document):
raise Exception("Can't find attribute {0} in XML element '{1}'. Document: {2}".format(attribute_name, element_name, xml_document))
return re.sub(attribute_regex, r'\g<1>{0}'.format(attribute_value), xml_document)
def set_etag(self, etag, timestamp=None):
"""
Sets the ETag for the mock response.
This function is used to mock a new goal state, and it also updates the timestamp (extensionsLastModifiedTickCount) in vmSettings.
"""
if timestamp is None:
timestamp = datetime.datetime.utcnow()
self.etag = etag
try:
vm_settings = json.loads(self.vm_settings)
vm_settings["extensionsLastModifiedTickCount"] = timeutil.datetime_to_ticks(timestamp)
self.vm_settings = json.dumps(vm_settings)
except ValueError: # some test data include syntax errors; ignore those
pass
def set_vm_settings_source(self, source):
"""
Sets the "extensionGoalStatesSource" for the mock vm_settings data
"""
vm_settings = json.loads(self.vm_settings)
vm_settings["extensionGoalStatesSource"] = source
self.vm_settings = json.dumps(vm_settings)
def set_incarnation(self, incarnation, timestamp=None):
"""
Sets the incarnation in the goal state, but not on its subcomponents (e.g. hosting env, shared config).
This function is used to mock a new goal state, and it also updates the timestamp (createdOnTicks) in ExtensionsConfig.
"""
self.goal_state = WireProtocolData.replace_xml_element_value(self.goal_state, "Incarnation", str(incarnation))
if self.ext_conf is not None:
if timestamp is None:
timestamp = datetime.datetime.utcnow()
self.ext_conf = WireProtocolData.replace_xml_attribute_value(self.ext_conf, "InVMGoalStateMetaData", "createdOnTicks", timeutil.datetime_to_ticks(timestamp))
def set_container_id(self, container_id):
self.goal_state = WireProtocolData.replace_xml_element_value(self.goal_state, "ContainerId", container_id)
def set_role_config_name(self, role_config_name):
self.goal_state = WireProtocolData.replace_xml_element_value(self.goal_state, "ConfigName", role_config_name)
def set_hosting_env_deployment_name(self, deployment_name):
self.hosting_env = WireProtocolData.replace_xml_attribute_value(self.hosting_env, "Deployment", "name", deployment_name)
def set_shared_config_deployment_name(self, deployment_name):
self.shared_config = WireProtocolData.replace_xml_attribute_value(self.shared_config, "Deployment", "name", deployment_name)
def set_extensions_config_sequence_number(self, sequence_number):
'''
Sets the sequence number for *all* extensions
'''
self.ext_conf = WireProtocolData.replace_xml_attribute_value(self.ext_conf, "RuntimeSettings", "seqNo", str(sequence_number))
def set_extensions_config_version(self, version):
'''
Sets the version for *all* extensions
'''
self.ext_conf = WireProtocolData.replace_xml_attribute_value(self.ext_conf, "Plugin", "version", version)
def set_extensions_config_state(self, state):
'''
Sets the state for *all* extensions
'''
self.ext_conf = WireProtocolData.replace_xml_attribute_value(self.ext_conf, "Plugin", "state", state)
def set_manifest_version(self, version):
'''
Sets the version of the extension manifest
'''
self.manifest = WireProtocolData.replace_xml_element_value(self.manifest, "Version", version)
def set_extension_config(self, ext_conf_file):
self.ext_conf = load_data(ext_conf_file)
def set_extension_config_requested_version(self, version):
self.ext_conf = WireProtocolData.replace_xml_element_value(self.ext_conf, "Version", version)
|
{
"content_hash": "4fd2a357a391702f683f63c3967f8a6e",
"timestamp": "",
"source": "github",
"line_count": 444,
"max_line_length": 169,
"avg_line_length": 43.914414414414416,
"alnum_prop": 0.6397579238896297,
"repo_name": "Azure/WALinuxAgent",
"id": "7ec311af46d23b0038ff27f9b288c9c2c96fac8e",
"size": "20127",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/protocol/mockwiredata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3073264"
},
{
"name": "Shell",
"bytes": "19249"
}
],
"symlink_target": ""
}
|
from twisted.plugin import IPlugin
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from txircd.user import RemoteUser
from txircd.utils import ModeType, now, timestamp
from zope.interface import implements
from datetime import datetime
class ServerUID(ModuleData, Command):
implements(IPlugin, IModuleData, ICommand)
name = "ServerUID"
core = True
def actions(self):
return [ ("welcome", 500, self.broadcastUID) ]
def serverCommands(self):
return [ ("UID", 1, self) ]
def parseParams(self, server, params, prefix, tags):
if len(params) < 9:
return None
uuid, signonTS, nick, realHost, displayHost, hostType, ident, ip, nickTS = params[:9]
try:
connectTime = datetime.utcfromtimestamp(int(signonTS))
nickTime = datetime.utcfromtimestamp(int(nickTS))
except ValueError:
return None
currParam = 10
modes = {}
for mode in params[9]:
if mode == "+":
continue
try:
modeType = self.ircd.userModeTypes[mode]
except KeyError:
return None # There's a mode that's NOT REAL so get out of here
param = None
if modeType in (ModeType.List, ModeType.ParamOnUnset, ModeType.Param):
param = params[currParam]
currParam += 1
if not param or " " in param:
return None
if modeType == ModeType.List:
if mode not in modes:
modes[mode] = []
modes[mode].append(param)
else:
modes[mode] = param
gecos = params[currParam]
return {
"uuid": uuid,
"connecttime": connectTime,
"nick": nick,
"ident": ident,
"host": realHost,
"displayhost": displayHost,
"hosttype": hostType,
"ip": ip,
"gecos": gecos,
"nicktime": nickTime,
"modes": modes
}
def execute(self, server, data):
connectTime = data["connecttime"]
nickTime = data["nicktime"]
newUser = RemoteUser(self.ircd, data["ip"], data["uuid"], data["host"])
newUser.changeHost(data["hosttype"], data["displayhost"], True)
newUser.changeIdent(data["ident"], server)
newUser.changeGecos(data["gecos"], True)
newUser.connectedSince = connectTime
newUser.nickSince = nickTime
newUser.idleSince = now()
if data["nick"] in self.ircd.userNicks: # Handle nick collisions
otherUser = self.ircd.users[self.ircd.userNicks[data["nick"]]]
if otherUser.localOnly:
changeOK = self.ircd.runActionUntilValue("localnickcollision", otherUser, newUser, server, users=[otherUser, newUser])
if changeOK is None:
return None
sameUser = ("{}@{}".format(otherUser.ident, otherUser.ip) == "{}@{}".format(newUser.ident, newUser.ip))
if sameUser and newUser.nickSince < otherUser.nickSince: # If the user@ip is the same, the newer nickname should win
newUser.changeNick(newUser.uuid, server)
elif sameUser and otherUser.nickSince < newUser.nickSince:
otherUser.changeNick(otherUser.uuid, server)
elif newUser.nickSince < otherUser.nickSince: # Otherwise, the older nickname should win
otherUser.changeNick(otherUser.uuid, server)
elif otherUser.nickSince < newUser.nickSince:
newUser.changeNick(newUser.uuid, server)
else: # If the nickname times are the same, fall back on connection times, with the same hierarchy as before
if sameUser and newUser.connectedSince < otherUser.connectedSince:
newUser.changeNick(newUser.uuid, server)
elif sameUser and otherUser.connectedSince < newUser.connectedSince:
otherUser.changeNick(otherUser.uuid, server)
elif newUser.connectedSince < otherUser.connectedSince:
otherUser.changeNick(otherUser.uuid, server)
elif otherUser.connectedSince < newUser.connectedSince:
newUser.changeNick(newUser.uuid, server)
else: # As a final fallback, change both nicknames
otherUser.changeNick(otherUser.uuid, server)
newUser.changeNick(newUser.uuid, server)
if newUser.nick is None: # wasn't set by above logic
newUser.changeNick(data["nick"], server)
modeList = []
for mode, param in data["modes"].iteritems():
modeType = self.ircd.userModeTypes[mode]
if modeType == ModeType.List:
for paramData in param:
modeList.append((True, mode, paramData))
else:
modeList.append((True, mode, param))
newUser.setModes(modeList, server.serverID)
newUser.register("connection", True)
newUser.register("USER", True)
newUser.register("NICK", True)
connectTimestamp = str(timestamp(connectTime))
nickTimestamp = str(timestamp(nickTime))
modeString = newUser.modeString(None)
self.ircd.broadcastToServers(server, "UID", newUser.uuid, connectTimestamp, newUser.nick, newUser.realHost, newUser.host(), newUser.currentHostType(), newUser.ident, newUser.ip, nickTimestamp, modeString, newUser.gecos, prefix=self.ircd.serverID)
return True
def broadcastUID(self, user):
self.ircd.broadcastToServers(None, "UID", user.uuid, str(timestamp(user.connectedSince)), user.nick, user.realHost, user.host(), user.currentHostType(), user.ident, user.ip, str(timestamp(user.nickSince)), user.modeString(None), user.gecos, prefix=self.ircd.serverID)
serverUID = ServerUID()
|
{
"content_hash": "978266ce6cb2da398036831d421b86d9",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 269,
"avg_line_length": 40.144,
"alnum_prop": 0.7227979274611399,
"repo_name": "ElementalAlchemist/txircd",
"id": "3062f41c7a261e7738ed3ef9f5f02dad18853f0b",
"size": "5018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "txircd/modules/server/uid.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "492365"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Feed.errors_since_good'
db.add_column('feeds', 'errors_since_good',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Feed.errors_since_good'
db.delete_column('feeds', 'errors_since_good')
models = {
'rss_feeds.duplicatefeed': {
'Meta': {'object_name': 'DuplicateFeed'},
'duplicate_address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'duplicate_feed_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'duplicate_link': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'duplicate_addresses'", 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.feed': {
'Meta': {'ordering': "['feed_title']", 'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'active_premium_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'db_index': 'True'}),
'active_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'db_index': 'True'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'branch_from_feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']", 'null': 'True', 'blank': 'True'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'errors_since_good': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'exception_code': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'favicon_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'favicon_not_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'feed_address': ('django.db.models.fields.URLField', [], {'max_length': '255', 'db_index': 'True'}),
'feed_address_locked': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_link_locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "'[Untitled]'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feed_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'has_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_page_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'hash_address_and_link': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_push': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'known_good': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'premium_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'queued_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'rss_feeds.feeddata': {
'Meta': {'object_name': 'FeedData'},
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'data'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'feed_classifier_counts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'story_count_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedloadtime': {
'Meta': {'object_name': 'FeedLoadtime'},
'date_accessed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loadtime': ('django.db.models.fields.FloatField', [], {})
}
}
complete_apps = ['rss_feeds']
|
{
"content_hash": "fb66fc27b34e88cd8348a681dd651db8",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 151,
"avg_line_length": 77.15116279069767,
"alnum_prop": 0.557648831951771,
"repo_name": "slava-sh/NewsBlur",
"id": "fd8e326dae5b829e35a11db6ac862f610f866040",
"size": "6659",
"binary": false,
"copies": "18",
"ref": "refs/heads/master",
"path": "apps/rss_feeds/migrations/0058_errors_since_good.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4431"
},
{
"name": "C++",
"bytes": "2926"
},
{
"name": "CSS",
"bytes": "674585"
},
{
"name": "CoffeeScript",
"bytes": "6451"
},
{
"name": "HTML",
"bytes": "265992"
},
{
"name": "Java",
"bytes": "696119"
},
{
"name": "JavaScript",
"bytes": "1561094"
},
{
"name": "M",
"bytes": "47696"
},
{
"name": "Nginx",
"bytes": "897"
},
{
"name": "Objective-C",
"bytes": "3716549"
},
{
"name": "Perl",
"bytes": "55598"
},
{
"name": "Python",
"bytes": "2374227"
},
{
"name": "R",
"bytes": "527"
},
{
"name": "Ruby",
"bytes": "870"
},
{
"name": "Shell",
"bytes": "40018"
}
],
"symlink_target": ""
}
|
""" Various kinds of layout components.
"""
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
from ..core import validation
from ..core.validation.warnings import EMPTY_LAYOUT, BOTH_CHILD_AND_ROOT
from ..core.properties import abstract, Bool, Int, Instance, List
from ..embed import notebook_div
from ..model import Model
@abstract
class LayoutDOM(Model):
""" An abstract base class for layout components. ``LayoutDOM`` is not
generally useful to instantiate on its own.
"""
width = Int(help="""
An optional width for the component (in pixels).
""")
height = Int(help="""
An optional height for the component (in pixels).
""")
disabled = Bool(False, help="""
Whether the widget will be disabled when rendered. If ``True``,
the widget will be greyed-out, and not respond to UI events.
""")
# TODO: (mp) Not yet, because it breaks plotting/notebook examples.
# Rename to _repr_html_ if we decide to enable this by default.
def __repr_html__(self):
return notebook_div(self)
@property
def html(self):
from IPython.core.display import HTML
return HTML(self.__repr_html__())
@abstract
class BaseBox(LayoutDOM):
""" Abstract base class for HBox and VBox. Do not use directly.
"""
def __init__(self, *args, **kwargs):
if len(args) > 0 and "children" in kwargs:
raise ValueError("'children' keyword cannot be used with positional arguments")
elif len(args) > 0:
kwargs["children"] = list(args)
super(BaseBox, self).__init__(**kwargs)
@validation.warning(EMPTY_LAYOUT)
def _check_empty_layout(self):
from itertools import chain
if not list(chain(self.children)):
return str(self)
@validation.warning(BOTH_CHILD_AND_ROOT)
def _check_child_is_also_root(self):
problems = []
for c in self.children:
if c.document is not None and c in c.document.roots:
problems.append(str(c))
if problems:
return ", ".join(problems)
else:
return None
children = List(Instance(LayoutDOM), help="""
The list of children, which can be other components including layouts, widgets and plots.
""")
class HBox(BaseBox):
""" Lay out child components in a single horizontal row.
Children can be specified as positional arguments, as a single argument
that is a sequence, or using the ``children`` keyword argument.
"""
class VBox(BaseBox):
""" Lay out child components in a single vertical row.
Children can be specified as positional arguments, as a single argument
that is a sequence, or using the ``children`` keyword argument.
"""
# parent class only, you need to set the fields you want
class VBoxForm(VBox):
"""
Basically, a VBox, where all components (generally form stuff)
is wrapped in a <form> tag - important for bootstrap css
"""
|
{
"content_hash": "c6bedb1ab2caa4c4a7e8d549c654790b",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 97,
"avg_line_length": 29.58823529411765,
"alnum_prop": 0.6497680583167661,
"repo_name": "quasiben/bokeh",
"id": "0df6ef27aca61c2625f8deb2d08c55922b139e2d",
"size": "3018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bokeh/models/layouts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "448001"
},
{
"name": "CoffeeScript",
"bytes": "2130601"
},
{
"name": "JavaScript",
"bytes": "2530410"
},
{
"name": "Python",
"bytes": "1056239"
},
{
"name": "Scala",
"bytes": "28977"
},
{
"name": "Shell",
"bytes": "13082"
}
],
"symlink_target": ""
}
|
_GENERATED_ON = '2013-06-19'
_MYSQL_VERSION = (5, 7, 1)
# Start MySQL Error messages
CR_UNKNOWN_ERROR = u"Unknown MySQL error"
CR_SOCKET_CREATE_ERROR = u"Can't create UNIX socket (%s)"
CR_CONNECTION_ERROR = u"Can't connect to local MySQL server through socket '%-.100s' (%s)"
CR_CONN_HOST_ERROR = u"Can't connect to MySQL server on '%-.100s' (%s)"
CR_IPSOCK_ERROR = u"Can't create TCP/IP socket (%s)"
CR_UNKNOWN_HOST = u"Unknown MySQL server host '%-.100s' (%s)"
CR_SERVER_GONE_ERROR = u"MySQL server has gone away"
CR_VERSION_ERROR = u"Protocol mismatch; server version = %s, client version = %s"
CR_OUT_OF_MEMORY = u"MySQL client ran out of memory"
CR_WRONG_HOST_INFO = u"Wrong host info"
CR_LOCALHOST_CONNECTION = u"Localhost via UNIX socket"
CR_TCP_CONNECTION = u"%-.100s via TCP/IP"
CR_SERVER_HANDSHAKE_ERR = u"Error in server handshake"
CR_SERVER_LOST = u"Lost connection to MySQL server during query"
CR_COMMANDS_OUT_OF_SYNC = u"Commands out of sync; you can't run this command now"
CR_NAMEDPIPE_CONNECTION = u"Named pipe: %-.32s"
CR_NAMEDPIPEWAIT_ERROR = u"Can't wait for named pipe to host: %-.64s pipe: %-.32s (%s)"
CR_NAMEDPIPEOPEN_ERROR = u"Can't open named pipe to host: %-.64s pipe: %-.32s (%s)"
CR_NAMEDPIPESETSTATE_ERROR = u"Can't set state of named pipe to host: %-.64s pipe: %-.32s (%s)"
CR_CANT_READ_CHARSET = u"Can't initialize character set %-.32s (path: %-.100s)"
CR_NET_PACKET_TOO_LARGE = u"Got packet bigger than 'max_allowed_packet' bytes"
CR_EMBEDDED_CONNECTION = u"Embedded server"
CR_PROBE_SLAVE_STATUS = u"Error on SHOW SLAVE STATUS:"
CR_PROBE_SLAVE_HOSTS = u"Error on SHOW SLAVE HOSTS:"
CR_PROBE_SLAVE_CONNECT = u"Error connecting to slave:"
CR_PROBE_MASTER_CONNECT = u"Error connecting to master:"
CR_SSL_CONNECTION_ERROR = u"SSL connection error: %-.100s"
CR_MALFORMED_PACKET = u"Malformed packet"
CR_WRONG_LICENSE = u"This client library is licensed only for use with MySQL servers having '%s' license"
CR_NULL_POINTER = u"Invalid use of null pointer"
CR_NO_PREPARE_STMT = u"Statement not prepared"
CR_PARAMS_NOT_BOUND = u"No data supplied for parameters in prepared statement"
CR_DATA_TRUNCATED = u"Data truncated"
CR_NO_PARAMETERS_EXISTS = u"No parameters exist in the statement"
CR_INVALID_PARAMETER_NO = u"Invalid parameter number"
CR_INVALID_BUFFER_USE = u"Can't send long data for non-string/non-binary data types (parameter: %s)"
CR_UNSUPPORTED_PARAM_TYPE = u"Using unsupported buffer type: %s (parameter: %s)"
CR_SHARED_MEMORY_CONNECTION = u"Shared memory: %-.100s"
CR_SHARED_MEMORY_CONNECT_REQUEST_ERROR = u"Can't open shared memory; client could not create request event (%s)"
CR_SHARED_MEMORY_CONNECT_ANSWER_ERROR = u"Can't open shared memory; no answer event received from server (%s)"
CR_SHARED_MEMORY_CONNECT_FILE_MAP_ERROR = u"Can't open shared memory; server could not allocate file mapping (%s)"
CR_SHARED_MEMORY_CONNECT_MAP_ERROR = u"Can't open shared memory; server could not get pointer to file mapping (%s)"
CR_SHARED_MEMORY_FILE_MAP_ERROR = u"Can't open shared memory; client could not allocate file mapping (%s)"
CR_SHARED_MEMORY_MAP_ERROR = u"Can't open shared memory; client could not get pointer to file mapping (%s)"
CR_SHARED_MEMORY_EVENT_ERROR = u"Can't open shared memory; client could not create %s event (%s)"
CR_SHARED_MEMORY_CONNECT_ABANDONED_ERROR = u"Can't open shared memory; no answer from server (%s)"
CR_SHARED_MEMORY_CONNECT_SET_ERROR = u"Can't open shared memory; cannot send request event to server (%s)"
CR_CONN_UNKNOW_PROTOCOL = u"Wrong or unknown protocol"
CR_INVALID_CONN_HANDLE = u"Invalid connection handle"
CR_SECURE_AUTH = u"Connection using old (pre-4.1.1) authentication protocol refused (client option 'secure_auth' enabled)"
CR_FETCH_CANCELED = u"Row retrieval was canceled by mysql_stmt_close() call"
CR_NO_DATA = u"Attempt to read column without prior row fetch"
CR_NO_STMT_METADATA = u"Prepared statement contains no metadata"
CR_NO_RESULT_SET = u"Attempt to read a row while there is no result set associated with the statement"
CR_NOT_IMPLEMENTED = u"This feature is not implemented yet"
CR_SERVER_LOST_EXTENDED = u"Lost connection to MySQL server at '%s', system error: %s"
CR_STMT_CLOSED = u"Statement closed indirectly because of a preceeding %s() call"
CR_NEW_STMT_METADATA = u"The number of columns in the result set differs from the number of bound buffers. You must reset the statement, rebind the result set columns, and execute the statement again"
CR_ALREADY_CONNECTED = u"This handle is already connected. Use a separate handle for each connection."
CR_AUTH_PLUGIN_CANNOT_LOAD = u"Authentication plugin '%s' cannot be loaded: %s"
CR_DUPLICATE_CONNECTION_ATTR = u"There is an attribute with the same name already"
CR_AUTH_PLUGIN_ERR = u"Authentication plugin '%s' reported error: %s"
# End MySQL Error messages
|
{
"content_hash": "2a51837abebfcd24f61e5999885256b6",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 200,
"avg_line_length": 70.88235294117646,
"alnum_prop": 0.7479253112033195,
"repo_name": "avatarnewyork/daily_harvest",
"id": "b81dc5c4578c94488b4548b57ea3d8a5331f9dd8",
"size": "6005",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "packages/mysql/connector/locales/eng/client_error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5785"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from decimal import Decimal
import pytest
import solution as f
to_unicode = f._compat.to_unicode
def _clean(form, value, **kwargs):
return value
def test_validation_error():
v = f.ValidationError(u'olé')
assert v.message == u'olé'
v = f.ValidationError()
assert v.message
testdata = [
(f.Field, None, u'user', u'obj', u'user'),
(f.Field, None, None, u'obj', u'obj'),
(f.Field, u'default', None, None, u'default'),
(f.Field, u'default', u'user', None, u'user'),
(f.Field, u'default', None, u'obj', u'obj'),
(f.Field, u'default', u'user', u'obj', u'user'),
(f.Field, u'default', u'', u'obj', u''),
(f.Number, None, 10, 20, 10),
(f.Number, None, None, 20, 20),
(f.Number, 5, None, None, 5),
(f.Number, 5, 10, None, 10),
(f.Number, 5, None, 20, 20),
(f.Number, 5, 10, 20, 10),
(f.Number, 5, u'', 20, None),
# Ni el valor por defecto ni el del obj se usan nunca
(f.Boolean, None, u'1', None, True),
(f.Boolean, True, u'1', None, True),
(f.Boolean, False, u'1', None, True),
(f.Boolean, None, u'1', u'0', True),
(f.Boolean, True, u'1', u'0', True),
(f.Boolean, False, u'1', u'0', True),
(f.Boolean, None, u'1', u'1', True),
(f.Boolean, True, u'1', u'1', True),
(f.Boolean, False, u'1', u'1', True),
(f.Boolean, None, u'0', None, False),
(f.Boolean, True, u'0', None, False),
(f.Boolean, False, u'0', None, False),
(f.Boolean, None, u'0', u'0', False),
(f.Boolean, True, u'0', u'0', False),
(f.Boolean, False, u'0', u'0', False),
(f.Boolean, None, u'0', u'1', False),
(f.Boolean, True, u'0', u'1', False),
(f.Boolean, False, u'0', u'1', False),
(f.Boolean, None, None, None, False),
(f.Boolean, True, None, None, False),
(f.Boolean, False, None, None, False),
(f.Boolean, None, None, u'0', False),
(f.Boolean, True, None, u'0', False),
(f.Boolean, False, None, u'0', False),
(f.Boolean, None, None, u'1', False),
(f.Boolean, True, None, u'1', False),
(f.Boolean, False, None, u'1', False),
]
@pytest.mark.parametrize('TheField,default,user_value,obj_value,expected', testdata)
def test_field(TheField, default, user_value, obj_value, expected):
field = TheField(default=default)
field.load_data(user_value, obj_value)
assert field.validate() == expected
def test_reset():
field = f.Field()
field.load_data(u'user', u'obj', u'file')
assert (field.str_value and field.obj_value and field.file_data and
not field.empty)
field.reset()
assert not (field.str_value or field.obj_value or field.file_data or
not field.empty)
def test_field_helpers():
field = f.Field()
assert (field.label_tag(u'Something', classes=u'span2') ==
u'<label class="span2">Something</label>')
assert field.error_tag(classes=u'alert alert-error') == u''
field.error = f.ValidationError(u'Error message')
assert (field.error_tag(classes=u'alert alert-error') ==
u'<div class="alert alert-error">Error message</div>')
def test_validate():
field = f.Field(default=u'default value')
field.validate()
assert field.validate() == u'default value'
assert not field.error
field = f.Field(validate=[f.Required])
assert field.validate() is None
assert str(field.error)
def test_validate_with_custom_msg():
field = f.Field(validate=[f.ValidEmail(u'invalid')])
field.load_data('email')
field.validate()
assert str(field.error) == 'invalid'
def test_clean_error_make_validation_fail():
def clean(data):
raise f.ValidationError('test')
field = f.Field(clean=clean)
field.name = u'abc'
field.load_data('foobar')
assert field.validate() is None
assert str(field.error) == 'test'
def test_render_text():
field = f.Text()
field.name = u'abc'
field.load_data(u'123')
assert field() == field.as_input()
assert (field(foo='bar') ==
u'<input foo="bar" name="abc" type="text" value="123">')
assert (field.as_textarea(foo='bar') ==
u'<textarea foo="bar" name="abc">123</textarea>')
assert (field(foo='bar', type='email') ==
u'<input foo="bar" name="abc" type="email" value="123">')
field.load_data(u'"Ben" & Jerry')
assert (field() == u'<input name="abc" type="text" value=\'"Ben" & Jerry\'>')
field = f.Text(hide_value=True)
field.name = u'abc'
field.load_data(u'123')
assert (field(foo='bar', type='password') ==
u'<input foo="bar" name="abc" type="password" value="">')
def test_render_text_extra():
field = f.Text(data_modal=True, aria_label='test', foo='niet', clean=_clean)
field.name = u'abc'
field.load_data(u'123')
assert field() == field.as_input()
assert (field(foo='bar') ==
u'<input aria-label="test" foo="bar" name="abc" type="text" value="123" data-modal>')
assert (field.as_textarea(foo='bar') ==
u'<textarea aria-label="test" foo="bar" name="abc" data-modal>123</textarea>')
assert (field(foo='bar', type='email') ==
u'<input aria-label="test" foo="bar" name="abc" type="email" value="123" data-modal>')
def test_validate_text():
field = f.Text(validate=[f.Required])
field.name = u'abc'
field.load_data(u'123')
assert field.validate() == u'123'
field = f.Text(hide_value=True, validate=[f.Required])
field.name = u'abc'
field.load_data(u'123')
assert field.validate() == u'123'
field = f.Text(validate=[f.Required])
field.name = u'abc'
field.load_data()
assert field.validate() is None
assert field.error
field.load_data(u'')
assert field.validate() == None
assert field.error
field.load_data(u' ')
assert field.validate() == None
assert field.error
def test_text_default():
value = u'abc'
field = f.Text(default=value)
assert field.validate() == value
def test_render_number():
field = f.Number()
field.name = u'abc'
field.load_data('123')
assert field() == field.as_input()
assert (field(foo='bar') ==
u'<input foo="bar" name="abc" type="number" value="123">')
assert (field.as_textarea(foo='bar') ==
u'<textarea foo="bar" name="abc">123</textarea>')
assert (field(foo='bar', type='score') ==
u'<input foo="bar" name="abc" type="score" value="123">')
field = f.Number(validate=[f.Required])
field.name = u'abc'
field.load_data('123')
assert (field() ==
u'<input name="abc" type="number" value="123" required>')
assert (field(required=False) ==
u'<input name="abc" type="number" value="123">')
def test_render_number_extra():
field = f.Number(data_modal=True, aria_label='test', foo='niet', clean=_clean)
field.name = u'abc'
field.load_data('123')
assert field() == field.as_input()
assert (field(foo='bar') ==
u'<input aria-label="test" foo="bar" name="abc" type="number" value="123" data-modal>')
assert (field.as_textarea(foo='bar') ==
u'<textarea aria-label="test" foo="bar" name="abc" data-modal>123</textarea>')
assert (field(foo='bar', type='score') ==
u'<input aria-label="test" foo="bar" name="abc" type="score" value="123" data-modal>')
def test_validate_number():
field = f.Number(validate=[f.Required])
field.name = u'abc'
field.load_data('123')
assert field.validate() == 123
field.load_data('defg')
assert field.validate() is None
assert field.error
def test_number_types():
field = f.Number(type=int)
field.load_data('3.02')
assert field.validate() == 3
field = f.Number(type=float)
field.load_data('3.02')
assert field.validate() == float('3.02')
field = f.Number(type=Decimal)
field.load_data('3.02')
assert field.validate() == Decimal('3.02')
def test_render_color():
field = f.Color()
field.name = u'abc'
field.load_data('#ffaf2e')
assert field() == field.as_input()
assert (field(foo='bar') ==
u'<input foo="bar" name="abc" type="color" value="#ffaf2e">')
assert (field(foo='bar', type='text') ==
u'<input foo="bar" name="abc" type="text" value="#ffaf2e">')
field = f.Color(validate=[f.Required])
field.name = u'abc'
field.load_data('#ffaf2e')
assert (field() ==
u'<input name="abc" type="color" value="#ffaf2e" required>')
assert (field(required=False) ==
u'<input name="abc" type="color" value="#ffaf2e">')
def test_render_color_extra():
field = f.Color(data_modal=True, aria_label='test', foo='niet', clean=_clean)
field.name = u'abc'
field.load_data('#ffaf2e')
assert field() == field.as_input()
assert (field(foo='bar') ==
u'<input aria-label="test" foo="bar" name="abc" type="color" value="#ffaf2e" data-modal>')
assert (field(foo='bar', type='text') ==
u'<input aria-label="test" foo="bar" name="abc" type="text" value="#ffaf2e" data-modal>')
def test_validate_color():
field = f.Color(validate=[f.Required])
field.name = u'abc'
field.load_data('#ffaf2e')
assert field.validate() == '#ffaf2e'
field.load_data('FFAF2E')
assert field.validate() == '#ffaf2e'
field.load_data('#fae')
assert field.validate() == '#ffaaee'
field.load_data('#faef')
assert field.validate() == '#ffaaeeff'
field.load_data('rgb(40, 104, 199)')
assert field.validate() == '#2868c7'
field.load_data('rgba(14,98,13,.5)')
assert field.validate() == '#0e620d80'
field.load_data()
assert not field.validate()
assert field.error
field.load_data('not a color')
assert not field.validate()
assert field.error
field.load_data('#ffaf2')
assert not field.validate()
assert field.error
field.load_data('rgb(300, 300, 300)')
assert not field.validate()
assert field.error
field.load_data('rgba(0, 0, 0, 2)')
assert not field.validate()
assert field.error
def test_color_default():
field = f.Color(default='#ffaf2e')
assert field.validate() == u'#ffaf2e'
def test_render_boolean():
field = f.Boolean()
field.name = u'abc'
field.load_data(obj_value=True)
assert field() == field.as_checkbox()
assert (field(foo='bar') ==
u'<input foo="bar" name="abc" type="checkbox" checked>')
field.load_data(obj_value=False)
assert field() == u'<input name="abc" type="checkbox">'
field.load_data(u'no')
assert field() == u'<input name="abc" type="checkbox">'
field = f.Boolean(validate=[f.Required])
field.name = u'abc'
field.load_data()
assert field() == u'<input name="abc" type="checkbox" required>'
assert field(required=False) == u'<input name="abc" type="checkbox">'
def test_render_boolean_extra():
field = f.Boolean(data_modal=True, aria_label='test', foo='niet', clean=_clean)
field.name = u'abc'
field.load_data(obj_value=True)
assert field() == field.as_checkbox()
assert (field(foo='bar') ==
u'<input aria-label="test" foo="bar" name="abc" type="checkbox" checked data-modal>')
def test_validate_boolean():
field = f.Boolean()
for val in [u'', u'0', u'no', u'off', u'false', u'NO', 'fAlsE']:
field.load_data(val)
value = field.validate()
assert value == False
for val in [u'1', u'ok', u'yes', u'Of course!!!1', u'whatever']:
field.load_data(val)
value = field.validate()
assert value == True
field = f.Boolean(validate=[f.Required])
value = field.validate()
assert value == False
def test_render_file():
field = f.File('.')
field.name = u'abc'
assert field() == field.as_input()
assert (field(foo='bar') ==
u'<input foo="bar" name="abc" type="file">')
field = f.File(validate=[f.Required])
field.name = u'abc'
assert (field() ==
u'<input name="abc" type="file" required>')
assert (field(required=False) ==
u'<input name="abc" type="file">')
def test_render_file_extra():
field = f.File(
'.', data_modal=True, aria_label='test', foo='niet', clean=_clean,
upload_to='aaaa', secret=False
)
field.name = u'abc'
assert field() == field.as_input()
assert (field(foo='bar') ==
u'<input aria-label="test" foo="bar" name="abc" type="file" data-modal>')
def test_validate_file():
field = f.File()
field.name = u'abc'
field.load_data(obj_value=u'obj value')
assert field.validate() == u'obj value'
assert not field.error
field.load_data(obj_value=u'obj value', file_data=u'file data')
assert field.validate() == u'file data'
assert not field.error
def test_field_as_dict():
message = u'Lorem ipsum'
field = f.Field(validate=[f.Required(message)])
field.name = u'abc'
assert field.validate() is None
expdict = {
'name': u'abc',
'value': u'',
'error': message,
}
result = sorted(list(field.as_dict().items()))
expected = sorted(list(expdict.items()))
assert result == expected
|
{
"content_hash": "23faaca1595fa85e3d62237ca672cc6e",
"timestamp": "",
"source": "github",
"line_count": 447,
"max_line_length": 102,
"avg_line_length": 29.651006711409394,
"alnum_prop": 0.594537498113777,
"repo_name": "jpscaletti/solution",
"id": "9652fd3f387376d9f4ad744d008c660909b5d408",
"size": "13271",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Python",
"bytes": "185430"
}
],
"symlink_target": ""
}
|
from mfr.core import FileHandler, get_file_extension
from .render import render_pdf
EXTENSIONS = ['.pdf']
class Handler(FileHandler):
"""FileHandler for Portable Document Format files."""
renderers = {
'html': render_pdf,
}
exporters = {}
def detect(self, fp):
return get_file_extension(fp.name) in EXTENSIONS
|
{
"content_hash": "4f67b75cdad064a850d41f37342db72d",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 57,
"avg_line_length": 20.705882352941178,
"alnum_prop": 0.6590909090909091,
"repo_name": "mfraezz/modular-file-renderer",
"id": "3492bf7c556a2670ed363c146d121b670372e95f",
"size": "376",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "mfr/ext/pdf/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "138612"
},
{
"name": "HTML",
"bytes": "28153"
},
{
"name": "Java",
"bytes": "1783806"
},
{
"name": "JavaScript",
"bytes": "933002"
},
{
"name": "Python",
"bytes": "99144"
}
],
"symlink_target": ""
}
|
"""Tests for the notebook kernel and session manager."""
from subprocess import PIPE
import time
from unittest import TestCase
from traitlets.config.loader import Config
from ..localinterfaces import localhost
from jupyter_client import KernelManager
from jupyter_client.multikernelmanager import MultiKernelManager
from .utils import skip_win32
class TestKernelManager(TestCase):
def _get_tcp_km(self):
c = Config()
km = MultiKernelManager(config=c)
return km
def _get_ipc_km(self):
c = Config()
c.KernelManager.transport = 'ipc'
c.KernelManager.ip = 'test'
km = MultiKernelManager(config=c)
return km
def _run_lifecycle(self, km):
kid = km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertTrue(km.is_alive(kid))
self.assertTrue(kid in km)
self.assertTrue(kid in km.list_kernel_ids())
self.assertEqual(len(km),1)
km.restart_kernel(kid, now=True)
self.assertTrue(km.is_alive(kid))
self.assertTrue(kid in km.list_kernel_ids())
km.interrupt_kernel(kid)
k = km.get_kernel(kid)
self.assertTrue(isinstance(k, KernelManager))
km.shutdown_kernel(kid, now=True)
self.assertTrue(not kid in km)
def _run_cinfo(self, km, transport, ip):
kid = km.start_kernel(stdout=PIPE, stderr=PIPE)
k = km.get_kernel(kid)
cinfo = km.get_connection_info(kid)
self.assertEqual(transport, cinfo['transport'])
self.assertEqual(ip, cinfo['ip'])
self.assertTrue('stdin_port' in cinfo)
self.assertTrue('iopub_port' in cinfo)
stream = km.connect_iopub(kid)
stream.close()
self.assertTrue('shell_port' in cinfo)
stream = km.connect_shell(kid)
stream.close()
self.assertTrue('hb_port' in cinfo)
stream = km.connect_hb(kid)
stream.close()
km.shutdown_kernel(kid, now=True)
def test_tcp_lifecycle(self):
km = self._get_tcp_km()
self._run_lifecycle(km)
def test_shutdown_all(self):
km = self._get_tcp_km()
kid = km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
km.shutdown_all()
self.assertNotIn(kid, km)
# shutdown again is okay, because we have no kernels
km.shutdown_all()
def test_tcp_cinfo(self):
km = self._get_tcp_km()
self._run_cinfo(km, 'tcp', localhost())
@skip_win32
def test_ipc_lifecycle(self):
km = self._get_ipc_km()
self._run_lifecycle(km)
@skip_win32
def test_ipc_cinfo(self):
km = self._get_ipc_km()
self._run_cinfo(km, 'ipc', 'test')
|
{
"content_hash": "ac4ed1a43aa66216c1784865757b7e5a",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 64,
"avg_line_length": 31.811764705882354,
"alnum_prop": 0.6198224852071006,
"repo_name": "nitin-cherian/LifeLongLearning",
"id": "2ca2ea4519b14e18911dec7734d1294557e9e3f7",
"size": "2704",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/jupyter_client/tests/test_multikernelmanager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32365"
},
{
"name": "CSS",
"bytes": "10259"
},
{
"name": "HTML",
"bytes": "55977"
},
{
"name": "JavaScript",
"bytes": "7368910"
},
{
"name": "Jupyter Notebook",
"bytes": "768879"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "17502534"
},
{
"name": "Shell",
"bytes": "7751"
},
{
"name": "Smarty",
"bytes": "30663"
}
],
"symlink_target": ""
}
|
"""Unit tests for the spatial_weights.SpatiallyVaryingWeightsFromMask
plugin."""
import unittest
from datetime import datetime
import numpy as np
from iris.coords import AuxCoord
from iris.cube import CubeList
from iris.tests import IrisTest
from iris.util import squeeze
from improver.blending.spatial_weights import SpatiallyVaryingWeightsFromMask
from improver.metadata.probabilistic import find_threshold_coordinate
from improver.synthetic_data.set_up_test_cubes import set_up_probability_cube
from improver.utilities.warnings_handler import ManageWarnings
class Test__repr__(IrisTest):
"""Test the repr method."""
def test_basic(self):
"""Test that the __repr__ returns the expected string."""
result = str(SpatiallyVaryingWeightsFromMask("model_id"))
msg = "<SpatiallyVaryingWeightsFromMask: fuzzy_length: 10>"
self.assertEqual(result, msg)
class Test__create_template_slice(IrisTest):
"""Test create_template_slice method"""
def setUp(self):
"""
Set up a basic input cube. Input cube has 2 thresholds on and 3
forecast_reference_times
"""
thresholds = [10, 20]
data = np.ones((2, 2, 3), dtype=np.float32)
cycle1 = set_up_probability_cube(
data,
thresholds,
spatial_grid="equalarea",
time=datetime(2017, 11, 10, 4, 0),
frt=datetime(2017, 11, 10, 0, 0),
)
cycle2 = set_up_probability_cube(
data,
thresholds,
spatial_grid="equalarea",
time=datetime(2017, 11, 10, 4, 0),
frt=datetime(2017, 11, 10, 1, 0),
)
cycle3 = set_up_probability_cube(
data,
thresholds,
spatial_grid="equalarea",
time=datetime(2017, 11, 10, 4, 0),
frt=datetime(2017, 11, 10, 2, 0),
)
self.cube_to_collapse = CubeList([cycle1, cycle2, cycle3]).merge_cube()
self.cube_to_collapse = squeeze(self.cube_to_collapse)
self.cube_to_collapse.rename("weights")
# This input array has 3 forecast reference times and 2 thresholds.
# The two thresholds have the same weights.
self.cube_to_collapse.data = np.array(
[
[[[1, 0, 1], [1, 1, 1]], [[1, 0, 1], [1, 1, 1]]],
[[[0, 0, 1], [0, 1, 1]], [[0, 0, 1], [0, 1, 1]]],
[[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]],
],
dtype=np.float32,
)
self.cube_to_collapse.data = np.ma.masked_equal(self.cube_to_collapse.data, 0)
self.plugin = SpatiallyVaryingWeightsFromMask("forecast_reference_time")
def test_multi_dim_blend_coord_fail(self):
"""Test error is raised when we have a multi-dimensional blend_coord"""
# Add a surface altitude coordinate which covers x and y dimensions.
altitudes = np.array([[10, 20, 30], [20, 30, 10]])
altitudes_coord = AuxCoord(
altitudes, standard_name="surface_altitude", units="m"
)
self.cube_to_collapse.add_aux_coord(altitudes_coord, data_dims=(2, 3))
message = "Blend coordinate must only be across one dimension"
plugin = SpatiallyVaryingWeightsFromMask("surface_altitude")
with self.assertRaisesRegex(ValueError, message):
plugin._create_template_slice(self.cube_to_collapse)
def test_varying_mask_fail(self):
"""Test error is raised when mask varies along collapsing dim"""
# Check fails when blending along threshold coordinate, as mask
# varies along this coordinate.
threshold_coord = find_threshold_coordinate(self.cube_to_collapse)
message = "The mask on the input cube can only vary along the blend_coord"
plugin = SpatiallyVaryingWeightsFromMask(threshold_coord.name())
with self.assertRaisesRegex(ValueError, message):
plugin._create_template_slice(self.cube_to_collapse)
def test_scalar_blend_coord_fail(self):
"""Test error is raised when blend_coord is scalar"""
message = "Blend coordinate .* has no associated dimension"
with self.assertRaisesRegex(ValueError, message):
self.plugin._create_template_slice(self.cube_to_collapse[0])
def test_basic(self):
"""Test a correct template slice is returned for simple case"""
expected = self.cube_to_collapse.copy()[:, 0, :, :]
result = self.plugin._create_template_slice(self.cube_to_collapse)
self.assertEqual(expected.metadata, result.metadata)
self.assertArrayAlmostEqual(expected.data, result.data)
def test_basic_no_change(self):
"""Test a correct template slice is returned for a case where
no slicing is needed"""
input_cube = self.cube_to_collapse.copy()[:, 0, :, :]
expected = input_cube.copy()
result = self.plugin._create_template_slice(input_cube)
self.assertEqual(expected.metadata, result.metadata)
self.assertArrayAlmostEqual(expected.data, result.data)
def test_aux_blending_coord(self):
"""Test a correct template slice is returned when blending_coord is
an AuxCoord"""
expected = self.cube_to_collapse.copy()[:, 0, :, :]
plugin = SpatiallyVaryingWeightsFromMask("forecast_period")
result = plugin._create_template_slice(self.cube_to_collapse)
self.assertEqual(expected.metadata, result.metadata)
self.assertArrayAlmostEqual(expected.data, result.data)
class Test_process(IrisTest):
"""Test process method"""
def setUp(self):
"""
Set up a basic cube and linear weights cube for the process
method. Input cube has 2 thresholds and 3 forecast_reference_times
"""
thresholds = [10, 20]
data = np.ones((2, 2, 3), dtype=np.float32)
cycle1 = set_up_probability_cube(
data,
thresholds,
spatial_grid="equalarea",
time=datetime(2017, 11, 10, 4, 0),
frt=datetime(2017, 11, 10, 0, 0),
)
cycle2 = set_up_probability_cube(
data,
thresholds,
spatial_grid="equalarea",
time=datetime(2017, 11, 10, 4, 0),
frt=datetime(2017, 11, 10, 1, 0),
)
cycle3 = set_up_probability_cube(
data,
thresholds,
spatial_grid="equalarea",
time=datetime(2017, 11, 10, 4, 0),
frt=datetime(2017, 11, 10, 2, 0),
)
self.cube_to_collapse = CubeList([cycle1, cycle2, cycle3]).merge_cube()
self.cube_to_collapse = squeeze(self.cube_to_collapse)
self.cube_to_collapse.rename("weights")
# This input array has 3 forecast reference times and 2 thresholds.
# The two thresholds have the same weights.
self.cube_to_collapse.data = np.array(
[
[[[1, 0, 1], [1, 1, 1]], [[1, 0, 1], [1, 1, 1]]],
[[[0, 0, 1], [0, 1, 1]], [[0, 0, 1], [0, 1, 1]]],
[[[1, 1, 1], [1, 1, 1]], [[1, 1, 1], [1, 1, 1]]],
],
dtype=np.float32,
)
self.cube_to_collapse.data = np.ma.masked_equal(self.cube_to_collapse.data, 0)
# Create a one_dimensional weights cube by slicing the larger
# weights cube.
# The resulting cube only has a forecast_reference_time coordinate.
self.one_dimensional_weights_cube = self.cube_to_collapse[:, 0, 0, 0]
self.one_dimensional_weights_cube.remove_coord("projection_x_coordinate")
self.one_dimensional_weights_cube.remove_coord("projection_y_coordinate")
self.one_dimensional_weights_cube.remove_coord(
find_threshold_coordinate(self.one_dimensional_weights_cube)
)
self.one_dimensional_weights_cube.data = np.array(
[0.2, 0.5, 0.3], dtype=np.float32
)
self.plugin = SpatiallyVaryingWeightsFromMask(
"forecast_reference_time", fuzzy_length=2
)
self.plugin_no_fuzzy = SpatiallyVaryingWeightsFromMask(
"forecast_reference_time", fuzzy_length=1
)
@ManageWarnings(record=True)
def test_none_masked(self, warning_list=None):
"""Test when we have no masked data in the input cube."""
self.cube_to_collapse.data = np.ones(self.cube_to_collapse.data.shape)
self.cube_to_collapse.data = np.ma.masked_equal(self.cube_to_collapse.data, 0)
expected_data = np.array(
[
[[0.2, 0.2, 0.2], [0.2, 0.2, 0.2]],
[[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]],
[[0.3, 0.3, 0.3], [0.3, 0.3, 0.3]],
],
dtype=np.float32,
)
message = "Expected masked input"
result = self.plugin.process(
self.cube_to_collapse, self.one_dimensional_weights_cube,
)
self.assertTrue(any(message in str(item) for item in warning_list))
self.assertArrayEqual(result.data, expected_data)
self.assertEqual(result.dtype, np.float32)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_all_masked(self):
"""Test when we have all masked data in the input cube."""
self.cube_to_collapse.data = np.ones(self.cube_to_collapse.data.shape)
self.cube_to_collapse.data = np.ma.masked_equal(self.cube_to_collapse.data, 1)
result = self.plugin.process(
self.cube_to_collapse, self.one_dimensional_weights_cube,
)
expected_data = np.zeros((3, 2, 3))
self.assertArrayAlmostEqual(expected_data, result.data)
self.assertTrue(result.metadata, self.cube_to_collapse.data)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_no_fuzziness_no_one_dimensional_weights(self):
"""Test a simple case where we have no fuzziness in the spatial
weights and no adjustment from the one_dimensional weights."""
self.one_dimensional_weights_cube.data = np.ones((3))
expected_result = np.array(
[
[[0.5, 0.0, 0.333333], [0.5, 0.333333, 0.333333]],
[[0.0, 0.0, 0.333333], [0.0, 0.333333, 0.333333]],
[[0.5, 1.0, 0.333333], [0.5, 0.333333, 0.333333]],
],
dtype=np.float32,
)
result = self.plugin_no_fuzzy.process(
self.cube_to_collapse, self.one_dimensional_weights_cube,
)
self.assertArrayAlmostEqual(result.data, expected_result)
self.assertEqual(result.metadata, self.cube_to_collapse.metadata)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_no_fuzziness_no_one_dimensional_weights_transpose(self):
"""Test a simple case where we have no fuzziness in the spatial
weights and no adjustment from the one_dimensional weights and
transpose the input cube."""
self.one_dimensional_weights_cube.data = np.ones((3))
expected_result = np.array(
[
[[0.5, 0.0, 0.333333], [0.5, 0.333333, 0.333333]],
[[0.0, 0.0, 0.333333], [0.0, 0.333333, 0.333333]],
[[0.5, 1.0, 0.333333], [0.5, 0.333333, 0.333333]],
],
dtype=np.float32,
)
self.cube_to_collapse.transpose([2, 0, 1, 3])
result = self.plugin_no_fuzzy.process(
self.cube_to_collapse, self.one_dimensional_weights_cube,
)
self.assertArrayAlmostEqual(result.data, expected_result)
self.assertEqual(result.metadata, self.cube_to_collapse.metadata)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_no_fuzziness_with_one_dimensional_weights(self):
"""Test a simple case where we have no fuzziness in the spatial
weights and an adjustment from the one_dimensional weights."""
expected_result = np.array(
[
[[0.4, 0.0, 0.2], [0.4, 0.2, 0.2]],
[[0.0, 0.0, 0.5], [0.0, 0.5, 0.5]],
[[0.6, 1.0, 0.3], [0.6, 0.3, 0.3]],
],
dtype=np.float32,
)
result = self.plugin_no_fuzzy.process(
self.cube_to_collapse, self.one_dimensional_weights_cube,
)
self.assertArrayAlmostEqual(result.data, expected_result)
self.assertEqual(result.metadata, self.cube_to_collapse.metadata)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_fuzziness_no_one_dimensional_weights(self):
"""Test a simple case where we have some fuzziness in the spatial
weights and no adjustment from the one_dimensional weights."""
self.one_dimensional_weights_cube.data = np.ones((3))
expected_result = np.array(
[
[[0.25, 0.0, 0.166667], [0.353553, 0.166667, 0.235702]],
[[0.00, 0.0, 0.166667], [0.000000, 0.166667, 0.235702]],
[[0.75, 1.0, 0.666667], [0.646447, 0.666667, 0.528595]],
],
dtype=np.float32,
)
result = self.plugin.process(
self.cube_to_collapse, self.one_dimensional_weights_cube,
)
self.assertArrayAlmostEqual(result.data, expected_result)
self.assertEqual(result.metadata, self.cube_to_collapse.metadata)
@ManageWarnings(ignored_messages=["Collapsing a non-contiguous coordinate."])
def test_fuzziness_with_one_dimensional_weights(self):
"""Test a simple case where we have some fuzziness in the spatial
weights and with adjustment from the one_dimensional weights."""
expected_result = np.array(
[
[[0.2, 0.0, 0.10], [0.282843, 0.10, 0.141421]],
[[0.0, 0.0, 0.25], [0.000000, 0.25, 0.353553]],
[[0.8, 1.0, 0.65], [0.717157, 0.65, 0.505025]],
],
dtype=np.float32,
)
result = self.plugin.process(
self.cube_to_collapse, self.one_dimensional_weights_cube,
)
self.assertArrayAlmostEqual(result.data, expected_result)
self.assertEqual(result.metadata, self.cube_to_collapse.metadata)
def test_fuzziness_with_unequal_weightings(self):
"""Simulate the case of two models and a nowcast at short lead times: two
unmasked slices with low weights, and one masked slice with high weights"""
self.cube_to_collapse.data[0].mask = np.full_like(
self.cube_to_collapse.data[0], False
)
self.one_dimensional_weights_cube.data = np.array(
[0.025, 1.0, 0.075], dtype=np.float32
)
expected_data = np.array(
[
[[0.25, 0.25, 0.136364], [0.25, 0.136364, 0.0892939]],
[[0.0, 0.0, 0.45454544], [0.0, 0.454545, 0.642824]],
[[0.75, 0.75, 0.409091], [0.75, 0.409091, 0.267882]],
],
dtype=np.float32,
)
result = self.plugin.process(
self.cube_to_collapse, self.one_dimensional_weights_cube,
)
self.assertArrayAlmostEqual(result.data, expected_data)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "ffd5d761a95727104f30c1b1cb987083",
"timestamp": "",
"source": "github",
"line_count": 349,
"max_line_length": 86,
"avg_line_length": 43.81948424068768,
"alnum_prop": 0.5972013339436344,
"repo_name": "fionaRust/improver",
"id": "40ba7e32122057dac38fae2963160c544d01cfc8",
"size": "16950",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "improver_tests/blending/spatial_weights/test_SpatiallyVaryingWeightsFromMask.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5026255"
},
{
"name": "Shell",
"bytes": "9493"
}
],
"symlink_target": ""
}
|
'''
Remove unneeded imaging products when only creating the dirty images.
'''
import os
from tasks import rmtables
def remove_products(name):
'''
For the dirty image, we don't need to keep the:
* residual
* flux
* model
If the image does not exist (i.e., no PSF due to flagging), remove all.
'''
removals = [".flux", ".residual", ".model"]
# Check if the image exists
if not os.path.exists(name + ".image"):
rmtables(name + ".*")
else:
for remove in removals:
rmtables(name + remove)
|
{
"content_hash": "f030f347066af454d465b66141114e14",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 75,
"avg_line_length": 20.142857142857142,
"alnum_prop": 0.601063829787234,
"repo_name": "e-koch/VLA_Lband",
"id": "2099739875048cb2c22b94d9ddcc958f8b2f2c2c",
"size": "565",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "imaging_pipeline/imaging_cleanup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2740022"
},
{
"name": "Shell",
"bytes": "98570"
}
],
"symlink_target": ""
}
|
"""
MIT License
Copyright (c) 2016 by Sinan Ugur Umu (SUU), University of Canterbury, Computational Biology
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
'''
Created on 27/10/2015
@author: suu13
'''
import argparse
import re
from Bio import SeqIO
import subprocess
import tempfile
import os
from joblib import Parallel,delayed
import time
__author__ = 'suu13'
def RNAup_Execute_Parallel(Input_sRNA_File,Input_Target_File,real_region):
sRNA_FASTA=list(SeqIO.parse(Input_sRNA_File,"fasta"))
Target_FASTA=list(SeqIO.parse(Input_Target_File,"fasta"))
if real_region is not None:
size=abs(real_region[1]-real_region[0])+5 # for bacteria +5 is enough
else:
size=25 #RNAup default
for sRNA in sRNA_FASTA:
sRNA_temp_file=tempfile.NamedTemporaryFile(prefix='RNAup_sRNA_',suffix='.tmp',mode="w+",delete=False)
sRNA_temp_file.write(">%s\n%s\n" %(str(sRNA.description),str(sRNA.seq)))
sRNA_temp_file.close()
RNAjobList=[]
while True:
#for RNA in Target_FASTA:
try:
if len(RNAjobList) < args.cpu:
RNA=Target_FASTA.pop(0)
RNA_temp_file=tempfile.NamedTemporaryFile(prefix='RNAup_RNA_',suffix='.tmp',mode="w+",delete=False)
RNA_temp_file.write(">%s\n%s\n" %(str(sRNA.description),str(sRNA.seq))) #write sRNA to the first line
RNA_temp_file.write(">%s\n%s\n" %(str(RNA.description),str(RNA.seq))) #write RNA to the second line
RNA_temp_file.close()
shell_command="""~/progs/ViennaRNA-2.1.9/Progs/RNAup -w %d -b -o --interaction_first < %s | gawk 'match($0,/ \((.*) =/,m) {printf m[1]}'""" %(size,RNA_temp_file.name)
job_id=subprocess.Popen(shell_command,shell=True,stdout=subprocess.PIPE)
RNAjobList.append([str(sRNA.description),str(RNA.description),job_id,RNA_temp_file.name])
else:
time.sleep(0.1) #wait for 0.1s to check status of jobs
for job in RNAjobList:
if subprocess.Popen.poll(job[2])!=None: #check the status of job object
os.remove(job[3])
print "%s\t%s\t%s" % (job[0],job[1],job[2].communicate()[0].strip())
RNAjobList.remove(job)
except:
while len(RNAjobList) != 0:
time.sleep(0.1) #wait for 0.1s to check status of jobs
for job in RNAjobList:
if subprocess.Popen.poll(job[2])!=None: #check the status of job object
os.remove(job[3])
print "%s\t%s\t%s" % (job[0],job[1],job[2].communicate()[0].strip())
RNAjobList.remove(job)
break
os.remove(sRNA_temp_file.name)
return
def Pairfold_Execute(Input_sRNA_File,Input_Target_File):
sRNA_FASTA=list(SeqIO.parse(Input_sRNA_File,"fasta"))
Target_FASTA=list(SeqIO.parse(Input_Target_File,"fasta"))
for sRNA in sRNA_FASTA:
for RNA in Target_FASTA:
shell_command="""/home/suu13/misc_stuff/MultiRNAFold-2.0/pairfold "%s" "%s" -m RNA | grep MFE | awk '{print $NF}' """ % (str(sRNA.seq),str(RNA.seq)) #enter pairfold actual path
result=subprocess.check_output(shell_command,shell=True)
print "%s\t%s\t%s" % (str(sRNA.description),str(RNA.description),result.strip())
return
def Pairfold_Execute_Parallel(Input_sRNA_File,Input_Target_File):
sRNA_FASTA=list(SeqIO.parse(Input_sRNA_File,"fasta"))
Target_FASTA=list(SeqIO.parse(Input_Target_File,"fasta"))
for sRNA in sRNA_FASTA:
RNAjobList=[]
#for RNA in Target_FASTA:
while True:
try:
if len(RNAjobList) < args.cpu:
RNA=Target_FASTA.pop(0)
shell_command="""/home/suu13/misc_stuff/MultiRNAFold-2.0/pairfold "%s" "%s" -m RNA | grep MFE | awk '{print $NF}' """ % (str(sRNA.seq),str(RNA.seq)) #enter pairfold actual path
job_id=subprocess.Popen(shell_command,shell=True,stdout=subprocess.PIPE)
RNAjobList.append([str(sRNA.description),str(RNA.description),job_id])
else:
time.sleep(0.1) #wait for 0.1s to check status of jobs
for job in RNAjobList:
if subprocess.Popen.poll(job[2])!=None: #check the status of job object
print "%s\t%s\t%s" % (job[0],job[1],job[2].communicate()[0].strip())
RNAjobList.remove(job)
except:
while len(RNAjobList) != 0:
time.sleep(0.1) #wait for 0.1s to check status of jobs
for job in RNAjobList:
if subprocess.Popen.poll(job[2])!=None: #check the status of job object
print "%s\t%s\t%s" % (job[0],job[1],job[2].communicate()[0].strip())
RNAjobList.remove(job)
break
return
def bifold_Execute(Input_sRNA_File,Input_Target_File):
sRNA_FASTA=list(SeqIO.parse(Input_sRNA_File,"fasta"))
Target_FASTA=list(SeqIO.parse(Input_Target_File,"fasta"))
for sRNA in sRNA_FASTA:
sRNA_temp_file=tempfile.NamedTemporaryFile(prefix='bifold_sRNA_',suffix='.tmp',mode="w+",delete=False)
sRNA_temp_file.write(">%s\n%s\n" %(str(sRNA.description),str(sRNA.seq)))
sRNA_temp_file.close()
for RNA in Target_FASTA:
RNA_temp_file=tempfile.NamedTemporaryFile(prefix='bifold_RNA_',suffix='.tmp',mode="w+",delete=False)
RNA_temp_file.write(">%s\n%s\n" %(str(RNA.description),str(RNA.seq)))
RNA_temp_file.close()
shell_command="""bifold -m 1 %s %s %s > /dev/null && head -n 1 %s | gawk '{match($0,/ENERGY = (.*) /,m); print m[1]}' """ %(sRNA_temp_file.name,RNA_temp_file.name,RNA_temp_file.name+".out",RNA_temp_file.name+".out")
result=subprocess.check_output(shell_command,shell=True)
print "%s\t%s\t%s" % (str(sRNA.description),str(RNA.description),result.strip())
os.remove(RNA_temp_file.name)
os.remove(sRNA_temp_file.name)
return
def bifold_Execute_Parallel(Input_sRNA_File,Input_Target_File):
sRNA_FASTA=list(SeqIO.parse(Input_sRNA_File,"fasta"))
Target_FASTA=list(SeqIO.parse(Input_Target_File,"fasta"))
for sRNA in sRNA_FASTA:
sRNA_temp_file=tempfile.NamedTemporaryFile(prefix='bifold_sRNA_',suffix='.tmp',mode="w+",delete=False)
sRNA_temp_file.write(">%s\n%s\n" %(str(sRNA.description),str(sRNA.seq)))
sRNA_temp_file.close()
RNAjobList=[]
while True:
#for RNA in Target_FASTA:
try:
if len(RNAjobList) < args.cpu:
RNA=Target_FASTA.pop(0)
RNA_temp_file=tempfile.NamedTemporaryFile(prefix='bifold_RNA_',suffix='.tmp',mode="w+",delete=False)
RNA_temp_file.write(">%s\n%s\n" %(str(RNA.description),str(RNA.seq)))
RNA_temp_file.close()
shell_command="""bifold -m 1 %s %s %s > /dev/null && head -n 1 %s | gawk '{match($0,/ENERGY = (.*) /,m); print m[1]}' """ %(sRNA_temp_file.name,RNA_temp_file.name,RNA_temp_file.name+".out",RNA_temp_file.name+".out")
#result=subprocess.check_output(shell_command,shell=True)
job_id=subprocess.Popen(shell_command,shell=True,stdout=subprocess.PIPE)
#print "%s\t%s\t%s" % (str(sRNA.description),str(RNA.description),result.strip())
RNAjobList.append([str(sRNA.description),str(RNA.description),job_id,RNA_temp_file.name])
#os.remove(RNA_temp_file.name)
else:
time.sleep(0.1) #wait for 0.1s to check status of jobs
for job in RNAjobList:
if subprocess.Popen.poll(job[2])!=None: #check the status of job object
os.remove(job[3])
print "%s\t%s\t%s" % (job[0],job[1],job[2].communicate()[0].strip())
RNAjobList.remove(job)
except:
while len(RNAjobList) != 0:
time.sleep(0.1) #wait for 0.1s to check status of jobs
for job in RNAjobList:
if subprocess.Popen.poll(job[2])!=None: #check the status of job object
os.remove(job[3])
print "%s\t%s\t%s" % (job[0],job[1],job[2].communicate()[0].strip())
RNAjobList.remove(job)
break
os.remove(sRNA_temp_file.name)
return
def DuplexFold_Execute(Input_sRNA_File,Input_Target_File):
sRNA_FASTA=list(SeqIO.parse(Input_sRNA_File,"fasta"))
Target_FASTA=list(SeqIO.parse(Input_Target_File,"fasta"))
for sRNA in sRNA_FASTA:
sRNA_temp_file=tempfile.NamedTemporaryFile(prefix='DuplexFold_sRNA_',suffix='.tmp',mode="w+",delete=False)
sRNA_temp_file.write(">%s\n%s\n" %(str(sRNA.description),str(sRNA.seq)))
sRNA_temp_file.close()
for RNA in Target_FASTA:
RNA_temp_file=tempfile.NamedTemporaryFile(prefix='DuplexFold_RNA_',suffix='.tmp',mode="w+",delete=False)
RNA_temp_file.write(">%s\n%s\n" %(str(RNA.description),str(RNA.seq)))
RNA_temp_file.close()
shell_command="""DuplexFold -m 1 %s %s %s > /dev/null && head -n 1 %s | gawk '{match($0,/ENERGY = (.*) /,m); print m[1]}' """ %(sRNA_temp_file.name,RNA_temp_file.name,RNA_temp_file.name+".out",RNA_temp_file.name+".out")
result=subprocess.check_output(shell_command,shell=True)
print "%s\t%s\t%s" % (str(sRNA.description),str(RNA.description),result.strip())
os.remove(RNA_temp_file.name)
os.remove(sRNA_temp_file.name)
return
def AccessFold_Execute(Input_sRNA_File,Input_Target_File):
sRNA_FASTA=list(SeqIO.parse(Input_sRNA_File,"fasta"))
Target_FASTA=list(SeqIO.parse(Input_Target_File,"fasta"))
for sRNA in sRNA_FASTA:
sRNA_temp_file=tempfile.NamedTemporaryFile(prefix='AccessFold_sRNA_',suffix='.tmp',mode="w+",delete=False)
sRNA_temp_file.write(">%s\n%s\n" %(str(sRNA.description),str(sRNA.seq)))
sRNA_temp_file.close()
for RNA in Target_FASTA:
RNA_temp_file=tempfile.NamedTemporaryFile(prefix='AccessFold_RNA_',suffix='.tmp',mode="w+",delete=False)
RNA_temp_file.write(">%s\n%s\n" %(str(RNA.description),str(RNA.seq)))
RNA_temp_file.close()
shell_command="""AccessFold -m 1 %s %s %s > /dev/null && head -n 1 %s | gawk '{match($0,/ENERGY = (.*) /,m); print m[1]}' """ %(sRNA_temp_file.name,RNA_temp_file.name,RNA_temp_file.name+".out",RNA_temp_file.name+".out")
result=subprocess.check_output(shell_command,shell=True)
print "%s\t%s\t%s" % (str(sRNA.description),str(RNA.description),result.strip())
os.remove(RNA_temp_file.name)
os.remove(sRNA_temp_file.name)
return
def main():
#home-made switch
program_function={ 'pairfold': lambda: Pairfold_Execute(args.sRNA,args.targetRNA) if args.cpu is None else Pairfold_Execute_Parallel(args.sRNA,args.targetRNA),
'bifold': lambda: bifold_Execute(args.sRNA,args.targetRNA) if args.cpu is None else bifold_Execute_Parallel(args.sRNA,args.targetRNA),
'DuplexFold': lambda: DuplexFold_Execute(args.sRNA,args.targetRNA),
'RNAup': lambda: RNAup_Execute_Parallel(args.sRNA,args.targetRNA,args.window),
'AccessFold': lambda: AccessFold_Execute(args.sRNA,args.targetRNA),
}
program_function[args.program]()
if __name__ == '__main__':
Argument_Parser=argparse.ArgumentParser(prog="RNA_prediction_wrapper.py")
Argument_Parser.add_argument('-program',type=str,help="Program to run",choices=['pairfold','bifold','DuplexFold','RNAup','AccessFold'],required=True)
Argument_Parser.add_argument('-sRNA',type=str,help="Small RNA file/miRNA etc.",required=True)
Argument_Parser.add_argument('-targetRNA',type=str,help="Target RNA file/mRNAs etc.",required=True)
Argument_Parser.add_argument('-cpu',type=int,help="Use parallel computing")
Argument_Parser.add_argument('-window',type=int,nargs=2,help="Target interaction region, from start to stop",required=False)
args=Argument_Parser.parse_args()
main()
|
{
"content_hash": "84d001c41dfc7168c9641221bc432494",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 235,
"avg_line_length": 47.739583333333336,
"alnum_prop": 0.5993890464761074,
"repo_name": "UCanCompBio/RNA_Interactions_Benchmark",
"id": "77ad8e750e62d1432ce1d3691b45d06c33074af5",
"size": "13775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/significance/RNA_prediction_wrapper.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45985"
},
{
"name": "R",
"bytes": "32268"
},
{
"name": "Shell",
"bytes": "9626"
}
],
"symlink_target": ""
}
|
"""
The Connection class negotiates and manages the connection state.
"""
import logging
# pylint: disable=import-error
try:
from urllib import parse as urlparse
except ImportError:
import urlparse
try:
import ssl
except ImportError:
ssl = None
import threading
import time
from pamqp import specification as spec
from rabbitpy import base
from rabbitpy import heartbeat
from rabbitpy import io
from rabbitpy import channel
from rabbitpy import channel0
from rabbitpy import events
from rabbitpy import exceptions
from rabbitpy import message
from rabbitpy.utils import queue
from rabbitpy import utils
LOGGER = logging.getLogger(__name__)
AMQP = 'amqp'
AMQPS = 'amqps'
if ssl:
SSL_CERT_MAP = {'ignore': ssl.CERT_NONE,
'optional': ssl.CERT_OPTIONAL,
'required': ssl.CERT_REQUIRED}
SSL_VERSION_MAP = dict()
if hasattr(ssl, 'PROTOCOL_SSLv2'):
SSL_VERSION_MAP['SSLv2'] = getattr(ssl, 'PROTOCOL_SSLv2')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
SSL_VERSION_MAP['SSLv3'] = getattr(ssl, 'PROTOCOL_SSLv3')
if hasattr(ssl, 'PROTOCOL_SSLv23'):
SSL_VERSION_MAP['SSLv23'] = getattr(ssl, 'PROTOCOL_SSLv23')
if hasattr(ssl, 'PROTOCOL_TLSv1'):
SSL_VERSION_MAP['TLSv1'] = getattr(ssl, 'PROTOCOL_TLSv1')
else:
SSL_CERT_MAP, SSL_VERSION_MAP = dict(), dict()
# pylint: disable=too-many-instance-attributes
class Connection(base.StatefulObject):
"""The Connection object is responsible for negotiating a connection and
managing its state. When creating a new instance of the Connection object,
if no URL is passed in, it uses the default connection parameters of
localhost port 5672, virtual host / with the guest/guest username/password
combination. Represented as a AMQP URL the connection information is:
:code:`amqp://guest:guest@localhost:5672/%2F`
To use a different connection, pass in a AMQP URL that follows the standard
format:
:code:`[scheme]://[username]:[password]@[host]:[port]/[virtual_host]`
The following example connects to the test virtual host on a RabbitMQ
server running at 192.168.1.200 port 5672 as the user "www" and the
password rabbitmq:
:code:`amqp://admin192.168.1.200:5672/test`
.. note::
You should be aware that most connection exceptions may be raised
during the use of all functionality in the library.
:param str url: The AMQP connection URL
:raises: rabbitpy.exceptions.AMQPException
:raises: rabbitpy.exceptions.ConnectionException
:raises: rabbitpy.exceptions.ConnectionResetException
:raises: rabbitpy.exceptions.RemoteClosedException
"""
CANCEL_METHOD = ['Basic.Cancel']
DEFAULT_CHANNEL_MAX = 65535
DEFAULT_TIMEOUT = 3
DEFAULT_HEARTBEAT_INTERVAL = 60.0
DEFAULT_LOCALE = 'en_US'
DEFAULT_URL = 'amqp://guest:guest@localhost:5672/%2F'
DEFAULT_VHOST = '%2F'
GUEST = 'guest'
PORTS = {'amqp': 5672, 'amqps': 5671, 'api': 15672}
QUEUE_WAIT = 0.01
def __init__(self, url=None):
"""Create a new instance of the Connection object"""
super(Connection, self).__init__()
# Create a name for the connection
self._name = '0x%x' % id(self)
# Extract parts of connection URL for use later
self._args = self._process_url(url or self.DEFAULT_URL)
# General events and queues shared across threads
self._events = events.Events()
# A queue for the child threads to put exceptions in
self._exceptions = queue.Queue()
# One queue for writing frames, regardless of the channel sending them
self._write_queue = queue.Queue()
# Lock used when managing the channel stack
self._channel_lock = threading.Lock()
# Attributes for core object threads
self._channel0 = None
self._channels = dict()
self._heartbeat = None
self._io = None
# Used by Message for breaking up body frames
self._max_frame_size = None
# Connect to RabbitMQ
self._connect()
def __enter__(self):
"""For use as a context manager, return a handle to this object
instance.
:rtype: Connection
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""When leaving the context, examine why the context is leaving, if
it's an exception or what.
"""
if exc_type and exc_val:
self._set_state(self.CLOSED)
raise exc_val
self.close()
@property
def args(self):
"""Return the connection arguments.
:rtype: dict
"""
return dict(self._args)
@property
def blocked(self):
"""Indicates if the connection is blocked from publishing by RabbitMQ.
This flag indicates communication from RabbitMQ that the connection is
blocked using the Connection.Blocked RPC notification from RabbitMQ
that was added in RabbitMQ 3.2.
:rtype: bool
"""
return self._events.is_set(events.CONNECTION_BLOCKED)
def channel(self, blocking_read=False):
"""Create a new channel
If blocking_read is True, the cross-thread Queue.get use will use
blocking operations that lower resource utilization and increase
throughput. However, due to how Python's blocking Queue.get is
implemented, KeyboardInterrupt is not raised when CTRL-C is
pressed.
:param bool blocking_read: Enable for higher throughput
:raises: rabbitpy.exceptions.AMQPException
:raises: rabbitpy.exceptions.RemoteClosedChannelException
"""
with self._channel_lock:
channel_id = self._get_next_channel_id()
channel_frames = queue.Queue()
self._channels[channel_id] = \
channel.Channel(channel_id,
self.capabilities,
self._events,
self._exceptions,
channel_frames,
self._write_queue,
self._max_frame_size,
self._io.write_trigger,
self,
blocking_read)
self._add_channel_to_io(self._channels[channel_id], channel_frames)
self._channels[channel_id].open()
return self._channels[channel_id]
def close(self):
"""Close the connection, including all open channels.
:raises: rabbitpy.exceptions.ConnectionClosed
"""
if not self.open and not self.opening:
raise exceptions.ConnectionClosed()
if not self._events.is_set(events.SOCKET_CLOSED):
self._set_state(self.CLOSING)
self._shutdown_connection()
LOGGER.debug('Setting to closed')
self._set_state(self.CLOSED)
@property
def capabilities(self):
"""Return the RabbitMQ Server capabilities from the connection
negotiation process.
:rtype: dict
"""
return self._channel0.properties.get('capabilities', dict())
@property
def server_properties(self):
"""Return the RabbitMQ Server properties from the connection
negotiation process.
:rtype: dict
"""
return self._channel0.properties
def _add_channel_to_io(self, channel_id, channel_queue):
"""Add a channel and queue to the IO object.
:param Queue.Queue channel_queue: Channel inbound msg queue
:param rabbitpy.base.AMQPChannel: The channel to add
"""
LOGGER.debug('Adding channel %s to io', int(channel_id))
self._io.add_channel(channel_id, channel_queue)
@property
def _api_credentials(self):
"""Return the auth credentials as a tuple
@rtype: tuple
"""
return self._args['username'], self._args['password']
@property
def _channel0_closed(self):
"""Returns a boolean indicating if the base connection channel (0)
is closed.
:rtype: bool
"""
return self._channel0.open and not \
self._events.is_set(events.CHANNEL0_CLOSED)
def _close_all_channels(self):
"""Close all open channels"""
for chan_id in [chan_id for chan_id in self._channels
if not self._channels[chan_id].closed]:
self._channels[chan_id].close()
self._channel0.close()
def _close_channels(self):
"""Close all the channels that are currently open."""
for channel_id in self._channels:
if (self._channels[channel_id].open and
not self._channels[channel_id].closing):
self._channels[channel_id].close()
def _connect(self):
"""Connect to the RabbitMQ Server"""
self._set_state(self.OPENING)
# Create and start the IO object that reads, writes & dispatches frames
self._io = self._create_io_thread()
self._io.daemon = True
self._io.start()
# Wait for IO to connect to the socket or raise an exception
while self.opening and not self._events.is_set(events.SOCKET_OPENED):
if not self._exceptions.empty():
exception = self._exceptions.get()
raise exception
# :meth:`threading.html#threading.Event.wait` always returns None
# in 2.6, so it is impossible to simply check wait() result
self._events.wait(events.SOCKET_OPENED, self._args['timeout'])
if not self._events.is_set(events.SOCKET_OPENED):
raise RuntimeError("Timeout waiting for opening the socket")
# If the socket could not be opened, return instead of waiting
if self.closed:
return self.close()
# Create the Channel0 queue and add it to the IO thread
self._channel0 = self._create_channel0()
self._add_channel_to_io(self._channel0, None)
self._channel0.start()
# Wait for Channel0 to raise an exception or negotiate the connection
while not self._channel0.open:
if not self._exceptions.empty():
exception = self._exceptions.get()
self._io.stop()
raise exception
time.sleep(0.01)
# Set the maximum frame size for channel use
self._max_frame_size = self._channel0.maximum_frame_size
# Create the heartbeat checker
self._heartbeat = heartbeat.Heartbeat(self._io, self._channel0,
self._args['heartbeat'])
self._heartbeat.start()
self._set_state(self.OPEN)
def _create_channel0(self):
"""Each connection should have a distinct channel0
:rtype: rabbitpy.channel0.Channel0
"""
return channel0.Channel0(connection_args=self._args,
events_obj=self._events,
exception_queue=self._exceptions,
write_queue=self._write_queue,
write_trigger=self._io.write_trigger,
connection=self)
def _create_io_thread(self):
"""Create the IO thread and the objects it uses for communication.
:rtype: rabbitpy.io.IO
"""
return io.IO(name='%s-io' % self._name,
kwargs={'events': self._events,
'exceptions': self._exceptions,
'connection_args': self._args,
'write_queue': self._write_queue})
def _create_message(self, channel_id, method_frame, header_frame, body):
"""Create a message instance with the channel it was received on and
the dictionary of message parts.
:param int channel_id: The channel id the message was sent on
:param method_frame: The method frame value
:type method_frame: pamqp.specification.Frame
:param header_frame: The header frame value
:type header_frame: pamqp.header.ContentHeader
:param str body: The message body
:rtype: rabbitpy.message.Message
"""
msg = message.Message(self._channels[channel_id],
body,
header_frame.properties.to_dict())
msg.method = method_frame
msg.name = method_frame.name
return msg
def _get_next_channel_id(self):
"""Return the next channel id
:rtype: int
"""
if not self._channels:
return 1
if self._max_channel_id == self._channel0.maximum_channels:
raise exceptions.TooManyChannelsError
return self._max_channel_id + 1
@property
def _max_channel_id(self):
"""Return the maximum channel ID that is currently being used.
:rtype: int
"""
return max(list(self._channels.keys()))
@staticmethod
def _normalize_expectations(channel_id, expectations):
"""Turn a class or list of classes into a list of class names.
:param int channel_id: The channel to normalize for
:param expectations: List of classes or class name or class obj
:type expectations: list or str or pamqp.specification.Frame
:rtype: list
"""
if isinstance(expectations, list):
output = list()
for value in expectations:
if isinstance(value, str):
output.append('%i:%s' % (channel_id, value))
else:
output.append('%i:%s' % (channel_id, value.name))
return output
elif utils.is_string(expectations):
return ['%i:%s' % (channel_id, expectations)]
return ['%i:%s' % (channel_id, expectations.name)]
def _process_url(self, url):
"""Parse the AMQP URL passed in and return the configuration
information in a dictionary of values.
The URL format is as follows:
amqp[s]://username:password@host:port/virtual_host[?query string]
Values in the URL such as the virtual_host should be URL encoded or
quoted just as a URL would be in a web browser. The default virtual
host / in RabbitMQ should be passed as %2F.
Default values:
- If port is omitted, port 5762 is used for AMQP and port 5671 is
used for AMQPS
- If username or password is omitted, the default value is guest
- If the virtual host is omitted, the default value of %2F is used
Query string options:
- heartbeat
- channel_max
- frame_max
- locale
- cacertfile - Path to CA certificate file
- certfile - Path to client certificate file
- keyfile - Path to client certificate key
- verify - Server certificate validation requirements (1)
- ssl_version - SSL version to use (2)
(1) Should be one of three values:
- ignore - Ignore the cert if provided (default)
- optional - Cert is validated if provided
- required - Cert is required and validated
(2) Should be one of four values:
- SSLv2
- SSLv3
- SSLv23
- TLSv1
:param str url: The AMQP url passed in
:rtype: dict
:raises: ValueError
"""
parsed = utils.urlparse(url)
self._validate_uri_scheme(parsed.scheme)
# Toggle the SSL flag based upon the URL scheme and if SSL is enabled
use_ssl = True if parsed.scheme == 'amqps' and ssl else False
# Ensure that SSL is available if SSL is requested
if parsed.scheme == 'amqps' and not ssl:
LOGGER.warning('SSL requested but not available, disabling')
# Figure out the port as specified by the scheme
scheme_port = self.PORTS[AMQPS] if parsed.scheme == AMQPS \
else self.PORTS[AMQP]
# Set the vhost to be after the base slash if it was specified
vhost = self.DEFAULT_VHOST
if parsed.path:
vhost = parsed.path[1:] or self.DEFAULT_VHOST
# Parse the query string
qargs = utils.parse_qs(parsed.query)
# Return the configuration dictionary to use when connecting
return {
'host': parsed.hostname,
'port': parsed.port or scheme_port,
'virtual_host': utils.unquote(vhost),
'username': urlparse.unquote(parsed.username or self.GUEST),
'password': urlparse.unquote(parsed.password or self.GUEST),
'timeout': self._qargs_int('timeout', qargs, self.DEFAULT_TIMEOUT),
'heartbeat': self._qargs_int('heartbeat', qargs,
self.DEFAULT_HEARTBEAT_INTERVAL),
'frame_max': self._qargs_int('frame_max', qargs,
spec.FRAME_MAX_SIZE),
'channel_max': self._qargs_int('channel_max', qargs,
self.DEFAULT_CHANNEL_MAX),
'locale': self._qargs_value('locale', qargs),
'ssl': use_ssl,
'cacertfile': self._qargs_mk_value(['cacertfile', 'ssl_cacert'],
qargs),
'certfile': self._qargs_mk_value(['certfile', 'ssl_cert'], qargs),
'keyfile': self._qargs_mk_value(['keyfile', 'ssl_key'], qargs),
'verify': self._qargs_ssl_validation(qargs),
'ssl_version': self._qargs_ssl_version(qargs)}
@staticmethod
def _qargs_int(key, values, default):
"""Return the query arg value as an integer for the specified key or
return the specified default value.
:param str key: The key to return the value for
:param dict values: The query value dict returned by urlparse
:param int default: The default return value
:rtype: int
"""
return int(values.get(key, [default])[0])
@staticmethod
def _qargs_float(key, values, default):
"""Return the query arg value as a float for the specified key or
return the specified default value.
:param str key: The key to return the value for
:param dict values: The query value dict returned by urlparse
:param float default: The default return value
:rtype: float
"""
return float(values.get(key, [default])[0])
def _qargs_ssl_validation(self, values):
"""Return the value mapped from the string value in the query string
for the AMQP URL specifying which level of server certificate
validation is required, if any.
:param dict values: The dict of query values from the AMQP URI
:rtype: int
"""
validation = self._qargs_mk_value(['verify', 'ssl_validation'], values)
if not validation:
return
elif validation not in SSL_CERT_MAP:
raise ValueError(
'Unsupported server cert validation option: %s',
validation)
return SSL_CERT_MAP[validation]
def _qargs_ssl_version(self, values):
"""Return the value mapped from the string value in the query string
for the AMQP URL for SSL version.
:param dict values: The dict of query values from the AMQP URI
:rtype: int
"""
version = self._qargs_value('ssl_version', values)
if not version:
return
elif version not in SSL_VERSION_MAP:
raise ValueError('Unuspported SSL version: %s' % version)
return SSL_VERSION_MAP[version]
@staticmethod
def _qargs_value(key, values, default=None):
"""Return the value from the query arguments for the specified key
or the default value.
:param str key: The key to get the value for
:param dict values: The query value dict returned by urlparse
:return: mixed
"""
return values.get(key, [default])[0]
def _qargs_mk_value(self, keys, values):
"""Try and find the query string value where the value can be specified
with different keys.
:param lists keys: The keys to check
:param dict values: The query value dict returned by urlparse
:return: mixed
"""
for key in keys:
value = self._qargs_value(key, values)
if value is not None:
return value
return None
def _shutdown_connection(self):
"""Tell Channel0 and IO to stop if they are not stopped.
"""
# Make sure the heartbeat is not running
if self._heartbeat is not None:
self._heartbeat.stop()
if not self._events.is_set(events.SOCKET_CLOSED):
self._close_all_channels()
# Let the IOLoop know to close
self._events.set(events.SOCKET_CLOSE)
# Break out of select waiting
self._trigger_write()
if (self._events.is_set(events.SOCKET_OPENED) and
not self._events.is_set(events.SOCKET_CLOSED)):
LOGGER.debug('Waiting on socket to close')
self._events.wait(events.SOCKET_CLOSED, 0.1)
self._io.stop()
else:
return self._io.stop()
while self._io.is_alive():
time.sleep(0.1)
def _trigger_write(self):
"""Notifies the IO loop we need to write a frame by writing a byte
to a local socket.
"""
utils.trigger_write(self._io.write_trigger)
def _validate_uri_scheme(self, scheme):
"""Insure that the specified URI scheme is supported by rabbitpy
:param str scheme: The value to validate
:raises: ValueError
"""
if scheme not in list(self.PORTS.keys()):
raise ValueError('Unsupported URI scheme: %s' % scheme)
|
{
"content_hash": "195f0dc377cb0fe84215ed04e51fd305",
"timestamp": "",
"source": "github",
"line_count": 642,
"max_line_length": 79,
"avg_line_length": 34.598130841121495,
"alnum_prop": 0.5934629929767693,
"repo_name": "gmr/rabbitpy",
"id": "c4f399de6908cd735cb5112dac4518816dff8529",
"size": "22212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rabbitpy/connection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "250614"
},
{
"name": "Shell",
"bytes": "1283"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from sentry.models import Environment, GroupRelease, Release
from sentry.testutils import TestCase
class GetOrCreateTest(TestCase):
def test_simple(self):
project = self.create_project()
group = self.create_group(project=project)
release = Release.objects.create(version='abc',
project=project,
organization_id=project.organization_id)
release.add_project(project)
env = Environment.objects.create(project_id=project.id, name='prod')
datetime = timezone.now()
grouprelease = GroupRelease.get_or_create(
group=group,
release=release,
environment=env,
datetime=datetime,
)
assert grouprelease.project_id == project.id
assert grouprelease.group_id == group.id
assert grouprelease.release_id == release.id
assert grouprelease.environment == 'prod'
assert grouprelease.first_seen == datetime
assert grouprelease.last_seen == datetime
datetime_new = timezone.now() + timedelta(days=1)
grouprelease = GroupRelease.get_or_create(
group=group,
release=release,
environment=env,
datetime=datetime_new,
)
assert grouprelease.first_seen == datetime
assert grouprelease.last_seen == datetime_new
datetime_new2 = datetime_new + timedelta(seconds=1)
# this should not update immediately as the window is too close
grouprelease = GroupRelease.get_or_create(
group=group,
release=release,
environment=env,
datetime=datetime_new2,
)
assert grouprelease.first_seen == datetime
assert grouprelease.last_seen == datetime_new
|
{
"content_hash": "94d63c4629d17027bd61ba18661ae885",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 81,
"avg_line_length": 33.62068965517241,
"alnum_prop": 0.6189743589743589,
"repo_name": "zenefits/sentry",
"id": "96a21f0dfa7dd522bc492696c8c258a078015218",
"size": "1950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sentry/models/test_grouprelease.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "249557"
},
{
"name": "HTML",
"bytes": "293019"
},
{
"name": "JavaScript",
"bytes": "975797"
},
{
"name": "Lua",
"bytes": "22367"
},
{
"name": "Makefile",
"bytes": "5959"
},
{
"name": "Python",
"bytes": "12550461"
},
{
"name": "Ruby",
"bytes": "4026"
},
{
"name": "Shell",
"bytes": "793"
}
],
"symlink_target": ""
}
|
import argparse
import functools
import json
import math
import os
import sys
import urllib.request
# MIN_VERSION is the earliest working version of the updater for self-update
# testing. If a backwards-incompatible change to the updater is made, it may be
# necessary to increase the version.
MIN_VERSION = 962507
def get_platform():
return 'Win'
def int_or_inf(v):
try:
return int(v)
except ValueError:
return -float('inf')
def fetch(platform, minimum, minimum_lexographic):
"""
Queries GCS for versions and returns a tuple (min, max), where min is the
(numerically) lowest version greater than `minimum` returned by GCS, and
max is the greatest (lexographically) version returned by GCS. Because GCS
compares and returns items in lexographic order, GCS may return no eligible
min items. (For example, if minimum = 200, it could return 30, 31, 32...)
In this case, min will be float('inf') and the caller should query with max
as the new minimum_lexographic.
"""
return functools.reduce(
lambda a, b: (min(a[0], int(b)) if int_or_inf(b) > minimum else a[0],
max(a[1], b)),
map(
lambda s: s[len(platform) + 1:-1],
json.load(
urllib.request.urlopen(
'https://storage.googleapis.com/storage/v1/b/'
'chromium-browser-snapshots/o?prefix=%s%%2F&startOffset=%s'
'%%2F%s&fields=prefixes&delimiter=%%2F' %
(platform, platform,
minimum_lexographic)))['prefixes']),
(float('inf'), ''))
def find(platform, minimum, maximum):
"""
Returns a version from GCS closest to `minimum` but not more than `maximum`
for `platform`. May return maximum even if it does not exist in GCS.
"""
found_min = maximum
pivot = str(minimum)
while pivot < str(maximum):
found, pivot = fetch(platform, minimum, pivot)
found_min = min(found_min, found)
return found_min
def lastDatum(platform):
"""
Returns a version from GCS that is at least k versions old, and only
updates every n versions.
"""
latest = int(
urllib.request.urlopen(
'https://storage.googleapis.com/storage/v1/b/'
'chromium-browser-snapshots/o/Mac%2FLAST_CHANGE?alt=media').read())
min_datum = latest - 3000
min_datum -= min_datum % 10000
return max(MIN_VERSION, find(platform, min_datum, latest))
def print_latest():
print(lastDatum(get_platform()))
def get_url():
print(
json.dumps({
'url': [
'https://storage.googleapis.com/storage/v1/b/'
'chromium-browser-snapshots/o/%s%%2F%s%%2Fupdater.zip?alt=media'
% (get_platform(), os.environ['_3PP_VERSION'])
],
'ext':
'.zip',
'name': ['updater.zip']
}))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
sub.add_parser('latest').set_defaults(func=lambda _opts: print_latest())
sub.add_parser('get_url').set_defaults(func=lambda _opts: get_url())
opts = ap.parse_args()
return opts.func(opts)
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "c7f1548e56a99fc83937f0b6e69b4903",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 80,
"avg_line_length": 31.18095238095238,
"alnum_prop": 0.6062919975565058,
"repo_name": "scheib/chromium",
"id": "9ae41d441812d111ed1248774b1a1771f5378167",
"size": "3460",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "third_party/updater/chromium_win_x86/3pp/fetch.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""Run Config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class TaskType(object):
MASTER = 'master'
PS = 'ps'
WORKER = 'worker'
class RunConfig(object):
"""This class specifies the configurations for an `Estimator` run."""
@property
def cluster_spec(self):
return None
@property
def evaluation_master(self):
return ''
@property
def is_chief(self):
return True
@property
def master(self):
return ''
@property
def num_ps_replicas(self):
return 0
@property
def num_worker_replicas(self):
return 1
@property
def task_id(self):
return 0
@property
def task_type(self):
return TaskType.WORKER
@property
def tf_random_seed(self):
return 1
@property
def save_summary_steps(self):
return 100
@property
def save_checkpoints_secs(self):
return 600
@property
def save_checkpoints_steps(self):
return None
@property
def keep_checkpoint_max(self):
return 5
@property
def keep_checkpoint_every_n_hours(self):
return 10000
|
{
"content_hash": "6d325c2a8d52a1d3ab4681a2d6158587",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 71,
"avg_line_length": 15.690140845070422,
"alnum_prop": 0.6642728904847397,
"repo_name": "yaroslavvb/tensorflow",
"id": "3439cd8657f0284a1760751a4f1c2781bde3be31",
"size": "1803",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "tensorflow/python/estimator/run_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7583"
},
{
"name": "C",
"bytes": "171999"
},
{
"name": "C++",
"bytes": "21262959"
},
{
"name": "CMake",
"bytes": "122876"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "775516"
},
{
"name": "HTML",
"bytes": "557007"
},
{
"name": "Java",
"bytes": "271894"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833840"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "36990"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "194247"
},
{
"name": "Python",
"bytes": "17754663"
},
{
"name": "Shell",
"bytes": "320602"
},
{
"name": "TypeScript",
"bytes": "773493"
}
],
"symlink_target": ""
}
|
from pebl import config
#
# Module Parameteres
#
_pcontrollertype = config.StringParameter(
'taskcontroller.type',
'The task controller to use.',
default = 'serial.SerialController'
)
#TODO:test
def fromconfig():
tctype = config.get('taskcontroller.type')
tcmodule,tcclass = tctype.split('.')
mymod = __import__("pebl.taskcontroller.%s" % tcmodule, fromlist=['pebl.taskcontroller'])
mytc = getattr(mymod, tcclass)
return mytc()
|
{
"content_hash": "998c79a919afc274c47b3d59c27226de",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 93,
"avg_line_length": 20.304347826086957,
"alnum_prop": 0.683083511777302,
"repo_name": "Alwnikrotikz/pebl-project",
"id": "fcc5a501f14a0f47729b47e34f3c2fd96c9e6eac",
"size": "467",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/pebl/taskcontroller/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9560"
},
{
"name": "CSS",
"bytes": "4331"
},
{
"name": "HTML",
"bytes": "4269"
},
{
"name": "JavaScript",
"bytes": "33578"
},
{
"name": "Python",
"bytes": "219403"
}
],
"symlink_target": ""
}
|
"""
MoinMoin - Hitcount Statistics
This macro creates a hitcount chart from the data in "event.log".
TODO: refactor to use a class, this code is ugly.
A lot of code here is duplicated in stats.useragents.
Maybe both can use same base class, maybe some parts are useful to other code.
@copyright: 2002-2004 Juergen Hermann <jh@web.de>,
2007 MoinMoin:ThomasWaldmann
@license: GNU GPL, see COPYING for details.
"""
_debug = 0
import time
from MoinMoin import caching, wikiutil, logfile
from MoinMoin.Page import Page
from MoinMoin.logfile import eventlog
# this is a CONSTANT used for on-disk caching, it must NOT be configurable and
# not depend on request.user!
DATE_FMT = '%04d-%02d-%02d' # % (y, m, d)
def linkto(pagename, request, params=''):
_ = request.getText
if not request.cfg.chart_options:
return text(pagename, request, params)
if _debug:
return draw(pagename, request)
page = Page(request, pagename)
# Create escaped query string from dict and params
querystr = {'action': 'chart', 'type': 'hitcounts'}
querystr = wikiutil.makeQueryString(querystr)
querystr = wikiutil.escape(querystr)
if params:
querystr += '&' + params
data = {'url': page.url(request, querystr)}
data.update(request.cfg.chart_options)
result = ('<img src="%(url)s" width="%(width)d" height="%(height)d"'
' alt="hitcounts chart">') % data
return result
def get_data(pagename, request, filterpage=None):
cache_days, cache_views, cache_edits = [], [], []
cache_date = 0
# Get results from cache
if filterpage:
arena = Page(request, pagename)
cache = caching.CacheEntry(request, arena, 'hitcounts', scope='item', use_pickle=True)
else:
arena = 'charts'
cache = caching.CacheEntry(request, arena, 'hitcounts', scope='wiki', use_pickle=True)
if cache.exists():
try:
cache_date, cache_days, cache_views, cache_edits = cache.content()
except:
cache.remove() # cache gone bad
# Get new results from the log
log = eventlog.EventLog(request)
try:
new_date = log.date()
except logfile.LogMissing:
new_date = None
# prepare data
days = []
views = []
edits = []
ratchet_day = None
ratchet_time = None
if new_date is not None:
log.set_filter(['VIEWPAGE', 'SAVEPAGE'])
latest = None
for event in log.reverse():
# don't use event_log.date()
if latest is None:
latest = event[0]
event_usecs = event[0]
if event_usecs <= cache_date:
break
eventpage = event[2].get('pagename', '')
if filterpage and eventpage != filterpage:
continue
event_secs = wikiutil.version2timestamp(event_usecs)
time_tuple = time.gmtime(event_secs) # must be UTC
day = tuple(time_tuple[0:3])
if day != ratchet_day:
# new day
while ratchet_time:
ratchet_time -= 86400 # seconds per day
rday = tuple(time.gmtime(ratchet_time)[0:3]) # must be UTC
if rday <= day:
break
days.append(DATE_FMT % rday)
views.append(0)
edits.append(0)
days.append(DATE_FMT % day)
views.append(0)
edits.append(0)
ratchet_day = day
ratchet_time = event_secs
if event[1] == 'VIEWPAGE':
views[-1] += 1
elif event[1] == 'SAVEPAGE':
edits[-1] += 1
days.reverse()
views.reverse()
edits.reverse()
# merge the day on the end of the cache
if cache_days and days and days[0] == cache_days[-1]:
cache_edits[-1] += edits[0]
cache_views[-1] += views[0]
days, views, edits = days[1:], views[1:], edits[1:]
# Update and save the cache
cache_days.extend(days)
cache_views.extend(views)
cache_edits.extend(edits)
if new_date is not None:
cache.update((latest, cache_days, cache_views, cache_edits))
return cache_days, cache_views, cache_edits
def text(pagename, request, params=''):
from MoinMoin.util.dataset import TupleDataset, Column
from MoinMoin.widget.browser import DataBrowserWidget
_ = request.getText
# check params
filterpage = None
if params.startswith('page='):
filterpage = wikiutil.url_unquote(params[len('page='):])
if request and request.values and 'page' in request.values:
filterpage = request.values['page']
days, views, edits = get_data(pagename, request, filterpage)
hits = TupleDataset()
hits.columns = [Column('day', label=_("Date"), align='left'),
Column('views', label=_("Views/day"), align='right'),
Column('edits', label=_("Edits/day"), align='right'),
]
maxentries = 30
if maxentries < len(days):
step = float(len(days))/ maxentries
else:
step = 1
sv = 0.0
se = 0.0
sd = 0.0
cnt = 0
for i in xrange(len(days)-1, -1, -1):
d, v, e = days[i], views[i], edits[i]
# sum up views and edits to step days
sd += 1
cnt += 1
sv += v
se += e
if cnt >= step:
cnt -= step
hits.addRow((d, "%.1f" % (sv/sd), "%.1f" % (se/sd)))
sv = 0.0
se = 0.0
sd = 0.0
table = DataBrowserWidget(request)
table.setData(hits)
return table.render(method="GET")
def draw(pagename, request):
import shutil, cStringIO
from MoinMoin.stats.chart import Chart, ChartData, Color
_ = request.getText
# check params
filterpage = None
if request and request.values and 'page' in request.values:
filterpage = request.values['page']
days, views, edits = get_data(pagename, request, filterpage)
import math
try:
scalefactor = float(max(views))/max(edits)
except (ZeroDivisionError, ValueError):
scalefactor = 1.0
else:
scalefactor = int(10 ** math.floor(math.log10(scalefactor)))
# scale edits up
edits = [x * scalefactor for x in edits]
# create image
image = cStringIO.StringIO()
c = Chart()
c.addData(ChartData(views, color='green'))
c.addData(ChartData(edits, color='red'))
chart_title = ''
if request.cfg.sitename:
chart_title = "%s: " % request.cfg.sitename
chart_title = chart_title + _('Page hits and edits')
if filterpage:
chart_title = _("%(chart_title)s for %(filterpage)s") % {
'chart_title': chart_title,
'filterpage': filterpage,
}
chart_title = "%s\n%sx%d" % (chart_title, _("green=view\nred=edit"), scalefactor)
c.option(
title=chart_title.encode('iso-8859-1', 'replace'), # gdchart can't do utf-8
xtitle=(_('date') + ' (Server)').encode('iso-8859-1', 'replace'),
ytitle=_('# of hits').encode('iso-8859-1', 'replace'),
title_font=c.GDC_GIANT,
#thumblabel = 'THUMB', thumbnail = 1, thumbval = 10,
#ytitle_color = Color('green'),
#yaxis2 = 1,
#ytitle2 = '# of edits',
#ytitle2_color = Color('red'),
#ylabel2_color = Color('black'),
#interpolations = 0,
threed_depth=1.0,
requested_yinterval=1.0,
stack_type=c.GDC_STACK_BESIDE
)
c.draw(c.GDC_LINE,
(request.cfg.chart_options['width'], request.cfg.chart_options['height']),
image, days)
request.content_type = 'image/gif'
request.content_length = len(image.getvalue())
# copy the image
image.reset()
shutil.copyfileobj(image, request, 8192)
|
{
"content_hash": "32ba521a43e08856332733d1a3886e95",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 94,
"avg_line_length": 30.802325581395348,
"alnum_prop": 0.5710330942494023,
"repo_name": "RealTimeWeb/wikisite",
"id": "bb27fb0075687332bfdeb26cea133baca2ae5977",
"size": "7976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MoinMoin/stats/hitcounts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "49395"
},
{
"name": "CSS",
"bytes": "204104"
},
{
"name": "ColdFusion",
"bytes": "142312"
},
{
"name": "Java",
"bytes": "491798"
},
{
"name": "JavaScript",
"bytes": "2107106"
},
{
"name": "Lasso",
"bytes": "23464"
},
{
"name": "Makefile",
"bytes": "4950"
},
{
"name": "PHP",
"bytes": "144585"
},
{
"name": "Perl",
"bytes": "44627"
},
{
"name": "Python",
"bytes": "7647140"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
}
|
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationtacacspolicy_authenticationvserver_binding(base_resource) :
""" Binding class showing the authenticationvserver that can be bound to authenticationtacacspolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._name = ""
self.___count = 0
@property
def name(self) :
"""Name of the TACACS+ policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the TACACS+ policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def boundto(self) :
"""The entity name to which policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
"""The entity name to which policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@property
def activepolicy(self) :
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationtacacspolicy_authenticationvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationtacacspolicy_authenticationvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch authenticationtacacspolicy_authenticationvserver_binding resources.
"""
try :
obj = authenticationtacacspolicy_authenticationvserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of authenticationtacacspolicy_authenticationvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationtacacspolicy_authenticationvserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count authenticationtacacspolicy_authenticationvserver_binding resources configued on NetScaler.
"""
try :
obj = authenticationtacacspolicy_authenticationvserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of authenticationtacacspolicy_authenticationvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationtacacspolicy_authenticationvserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class authenticationtacacspolicy_authenticationvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.authenticationtacacspolicy_authenticationvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationtacacspolicy_authenticationvserver_binding = [authenticationtacacspolicy_authenticationvserver_binding() for _ in range(length)]
|
{
"content_hash": "c43461aee26b61e63b90f4956b8fc904",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 158,
"avg_line_length": 29.5,
"alnum_prop": 0.7173779865223606,
"repo_name": "mahabs/nitro",
"id": "0127db10ebf7a9b18901e49147e94676b6ee3380",
"size": "5511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/authentication/authenticationtacacspolicy_authenticationvserver_binding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "498"
},
{
"name": "Python",
"bytes": "10647176"
}
],
"symlink_target": ""
}
|
"""Test namedtuple attributes.
Regression test for:
https://bitbucket.org/logilab/pylint/issue/93/pylint-crashes-on-namedtuple-attribute
"""
from __future__ import absolute_import, print_function
__revision__ = None
from collections import namedtuple
Thing = namedtuple('Thing', ())
Fantastic = namedtuple('Fantastic', ['foo'])
def test():
"""Test member access in named tuples."""
print(Thing.x) # [no-member]
fan = Fantastic(1)
print(fan.foo)
# Should not raise protected-access.
fan2 = fan._replace(foo=2)
# This is a bug.
print(fan2.foo) # [no-member]
|
{
"content_hash": "bc9911c36a514790b902ea7f2775782a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 84,
"avg_line_length": 25.869565217391305,
"alnum_prop": 0.680672268907563,
"repo_name": "willemneal/Docky",
"id": "7283db2072d2669f0020471d6cfa13077ef627e0",
"size": "595",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/pylint/test/functional/namedtuple_member_inference.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "636"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "5145"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "3168"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "4528"
},
{
"name": "BlitzBasic",
"bytes": "1730"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "109073"
},
{
"name": "C#",
"bytes": "17784"
},
{
"name": "C++",
"bytes": "79372"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "26952"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "2878"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "91743"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "5475"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "5709"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "27879"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "15760"
},
{
"name": "Gnuplot",
"bytes": "10376"
},
{
"name": "Go",
"bytes": "172"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "81613"
},
{
"name": "JavaScript",
"bytes": "14143"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "306"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "76274"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "3385"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "318"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "17354"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "84519"
},
{
"name": "Perl",
"bytes": "3611"
},
{
"name": "Perl6",
"bytes": "49676"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PowerShell",
"bytes": "6932"
},
{
"name": "Prolog",
"bytes": "738"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "6272729"
},
{
"name": "R",
"bytes": "4057"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "1887"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "47137"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "121510"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smalltalk",
"bytes": "156665"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "16922"
},
{
"name": "Visual Basic",
"bytes": "17210"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XSLT",
"bytes": "755"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "26388"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
}
|
"""
This file is a basic script that will determine if a ball would score if it were to be catapulted immediately.
We are using the led strips around the target to find it.
Flowchart:
-Get and flip image
-Count pixels within hsv range
-Average their y-coordinates
-print shit if that is in range, smiss if not
"""
import ConfigParser
import time
import numpy as np
import cv2
#parse config stuff
config = ConfigParser.RawConfigParser()
config.read("../vision.conf")
exposure = int(config.get('camera','exposure'))
height = int(config.get('camera','height'))
width = int(config.get('camera','width'))
hue_lower = int(config.get('shooter','hue_lower'))
hue_upper = int(config.get('shooter','hue_upper'))
saturation_lower = int(config.get('shooter','saturation_lower'))
saturation_upper = int(config.get('shooter','saturation_upper'))
value_lower = int(config.get('shooter','value_lower'))
value_upper = int(config.get('shooter','value_upper'))
lower_avg = int(config.get('shooter','lower_avg'))
upper_avg = int(config.get('shooter','upper_avg'))
min_pixel_weight = int(config.get('shooter','min_pixel_weight'))
camera = cv2.VideoCapture(0)
camera.set(cv2.cv.CV_CAP_PROP_EXPOSURE,exposure) #time in milliseconds. 5 gives dark image. 100 gives bright image.
camera.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH,width)
camera.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT,height)
print camera.get(3),camera.get(4)
try:
while True:
_,capture = camera.read()
capture = cv2.flip(capture,1)
hsvcapture = cv2.cvtColor(capture,cv2.COLOR_BGR2HSV)
inrangepixels = cv2.inRange(hsvcapture,np.array((hue_lower,saturation_lower,value_lower)),np.array((hue_upper,saturation_upper,value_upper)))#in opencv, HSV is 0-180,0-255,0-255
try:
#This averaging math is explained in ./math.png
row_averages = np.mean(inrangepixels, axis=1)
avgheight=int(np.average(range(0,height), weights=row_averages, axis=0))
except ZeroDivisionError:
avgheight=0
if(np.sum(row_averages, axis=0)<min_pixel_weight):
avgheight=0
cv2.line(capture,(0,avgheight),(width,avgheight),(255,0,0),5)
cv2.imshow("capture",capture)
cv2.imshow("inrangepixels",inrangepixels)
cv2.waitKey(1)
#raise Exception("")
except KeyboardInterrupt as e:
print "done"
print e
cv2.destroyAllWindows()
camera.release()
|
{
"content_hash": "ea53c84c5453628bcd0395b0757cd4c6",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 185,
"avg_line_length": 39.53968253968254,
"alnum_prop": 0.6732236049779206,
"repo_name": "FRC2914/aerial-assist",
"id": "d773690b5e0b237056b989aff123109d7376541b",
"size": "2491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python vision stuff/mshooting/shooter.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Java",
"bytes": "39470"
},
{
"name": "Python",
"bytes": "55493"
}
],
"symlink_target": ""
}
|
from django.conf import settings
NORMALISE_TO_UPPER = getattr(settings, 'ADB_NORMALISE_TO_UPPER', True)
|
{
"content_hash": "5f46f636aa82781a1e07a0d77327e975",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 70,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.7884615384615384,
"repo_name": "7wonders/django-addresses",
"id": "c18f6f271e6a6f5dcd564dbd5b00bcd17672a06d",
"size": "104",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "addressbook/conf/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "11293"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division
import os
import threading
import numpy as np
from model.base.document import Document
from model.config import BATCH_SIZE, SAMPLE_LENGTH
from model.utils import get_answers_for_doc
def get_data_for_model(train_dir, labels, test_dir=None, nn_model=None,
as_generator=False, batch_size=BATCH_SIZE,
word2vec_model=None, scaler=None):
"""
Get data in the form of matrices or generators for both train and test sets.
:param train_dir: directory with train files
:param labels: an iterable of predefined labels (controlled vocabulary)
:param test_dir: directory with test files
:param nn_model: Keras model of the NN
:param as_generator: flag whether to return a generator or in-memory matrix
:param batch_size: integer, size of the batch
:param word2vec_model: trained w2v gensim model
:param scaler: scaling object for X matrix normalisation e.g. StandardScaler
:return: tuple with 2 elements for train and test data. Each element can be
either a pair of matrices (X, y) or their generator
"""
kwargs = dict(
label_indices={lab: i for i, lab in enumerate(labels)},
word2vec_model=word2vec_model,
scaler=scaler,
nn_model=nn_model,
)
if as_generator:
filename_it = FilenameIterator(train_dir, batch_size)
train_data = iterate_over_batches(filename_it, **kwargs)
else:
"""
train_files is a set for the name of all txt in train_dir
"""
train_files = {filename[:-4] for filename in os.listdir(train_dir) if '.DS_S' not in filename}
train_data = build_x_and_y(train_files, train_dir, **kwargs)
test_data = None
if test_dir:
test_files = {filename[:-4] for filename in os.listdir(test_dir) if '.DS_S' not in filename}
test_data = build_x_and_y(test_files, test_dir, **kwargs)
return train_data, test_data
def build_x_and_y(filenames, file_directory, **kwargs):
"""
Given file names and their directory, build (X, y) data matrices
:param filenames: iterable of strings showing file ids (no extension)
:param file_directory: path to a directory where those files lie
:param kwargs: additional necessary data for matrix building e.g. scaler
:return: a tuple (X, y)
"""
label_indices = kwargs['label_indices']
word2vec_model = kwargs['word2vec_model']
scaler = kwargs['scaler']
nn_model = kwargs['nn_model']
x_matrix = np.zeros((len(filenames), SAMPLE_LENGTH, word2vec_model.vector_size))
y_matrix = np.zeros((len(filenames), len(label_indices)), dtype=np.bool_)
for doc_id, fname in enumerate(filenames):
doc = Document(doc_id, os.path.join(file_directory, fname + '.txt'))
words = doc.get_all_words()[:SAMPLE_LENGTH]
for i, w in enumerate(words):
if w in word2vec_model:
word_vector = word2vec_model[w].reshape(1, -1)
x_matrix[doc_id][i] = scaler.transform(word_vector, copy=True)[0]
labels = get_answers_for_doc(
fname + '.txt',
file_directory,
filtered_by=set(label_indices.keys()),
)
for lab in labels:
index = label_indices[lab]
y_matrix[doc_id][index] = True
if nn_model and type(nn_model.input) == list:
return [x_matrix] * len(nn_model.input), y_matrix
else:
return [x_matrix], y_matrix
def iterate_over_batches(filename_it, **kwargs):
"""
Iterate infinitely over a given filename iterator
:param filename_it: FilenameIterator object
:param kwargs: additional necessary data for matrix building e.g. scaler
:return: yields tuples (X, y) when called
"""
while True:
files = filename_it.next()
yield build_x_and_y(files, filename_it.dirname, **kwargs)
class FilenameIterator(object):
""" A threadsafe iterator yielding a fixed number of filenames from a given
folder and looping forever. Can be used for external memory training. """
def __init__(self, dirname, batch_size):
self.dirname = dirname
self.batch_size = batch_size
self.lock = threading.Lock()
self.files = list({filename[:-4] for filename in os.listdir(dirname)})
self.i = 0
def __iter__(self):
return self
def next(self):
with self.lock:
if self.i == len(self.files):
self.i = 0
batch = self.files[self.i:self.i + self.batch_size]
if len(batch) < self.batch_size:
self.i = 0
else:
self.i += self.batch_size
return batch
|
{
"content_hash": "c55c862cb0f66fbacd2c2fbc37e16abd",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 102,
"avg_line_length": 35.15555555555556,
"alnum_prop": 0.6310577328276443,
"repo_name": "HeadCow/ARPS",
"id": "7714143ad68c3e2ccb919095b3aa92774a2110f8",
"size": "4769",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ML_model/model/nn/input_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "611954"
},
{
"name": "Shell",
"bytes": "168"
}
],
"symlink_target": ""
}
|
import networkx as nx
import matplotlib.pyplot as plt
from nltk.corpus import wordnet as wn
def traverse(graph, start, node):
graph.depth[node.name] = node.shortest_path_distance(start)
for child in node.hyponyms():
graph.add_edge(node.name, child.name)
traverse(graph, start, child)
def hyponym_graph(start):
G = nx.Graph()
G.depth = {}
traverse(G, start, start)
return G
def graph_draw(graph):
nx.draw(graph,
node_size=[16 * graph.degree(n) for n in graph],
node_color=[graph.depth[n] for n in graph],
with_labels=False)
plt.show()
def main():
dog = wn.synset('dog.n.01')
graph = hyponym_graph(dog)
print('drawing...')
graph_draw(graph)
if __name__ == '__main__':
main()
|
{
"content_hash": "4f5223c6e84647e9b223dca444e823d8",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 63,
"avg_line_length": 21.86111111111111,
"alnum_prop": 0.6099110546378653,
"repo_name": "luoshao23/ML_algorithm",
"id": "58cb8d943c1057ba4f557218f8839728c544b76b",
"size": "787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NLP/wordnet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4514"
},
{
"name": "C++",
"bytes": "3203"
},
{
"name": "Jupyter Notebook",
"bytes": "1542804"
},
{
"name": "MATLAB",
"bytes": "119208"
},
{
"name": "Makefile",
"bytes": "85"
},
{
"name": "Python",
"bytes": "370897"
}
],
"symlink_target": ""
}
|
"""Models for permissions."""
from typing import TYPE_CHECKING
import attr
if TYPE_CHECKING:
# pylint: disable=unused-import
from homeassistant.helpers import entity_registry as ent_reg # noqa: F401
from homeassistant.helpers import device_registry as dev_reg # noqa: F401
@attr.s(slots=True)
class PermissionLookup:
"""Class to hold data for permission lookups."""
entity_registry: "ent_reg.EntityRegistry" = attr.ib()
device_registry: "dev_reg.DeviceRegistry" = attr.ib()
|
{
"content_hash": "895ae0e5e00ba17dd14e448f2812f391",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 78,
"avg_line_length": 29.705882352941178,
"alnum_prop": 0.7267326732673267,
"repo_name": "pschmitt/home-assistant",
"id": "435d5f2e9823d5cc60dc90a93817a8fa507b2427",
"size": "505",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/auth/permissions/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1522"
},
{
"name": "Python",
"bytes": "24807200"
},
{
"name": "Shell",
"bytes": "4342"
}
],
"symlink_target": ""
}
|
"""
Various data structures used in query construction.
Factored out from django.db.models.query to avoid making the main module very
large and/or so that they can be used by other modules without getting into
circular import difficulties.
"""
import copy
import functools
import inspect
import warnings
from collections import namedtuple
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db.models.constants import LOOKUP_SEP
from django.utils import tree
from django.utils.deprecation import RemovedInDjango40Warning
# PathInfo is used when converting lookups (fk__somecol). The contents
# describe the relation in Model terms (model Options and Fields for both
# sides of the relation. The join_field is the field backing the relation.
PathInfo = namedtuple('PathInfo', 'from_opts to_opts target_fields join_field m2m direct filtered_relation')
class InvalidQueryType(type):
@property
def _subclasses(self):
return (FieldDoesNotExist, FieldError)
def __warn(self):
warnings.warn(
'The InvalidQuery exception class is deprecated. Use '
'FieldDoesNotExist or FieldError instead.',
category=RemovedInDjango40Warning,
stacklevel=4,
)
def __instancecheck__(self, instance):
self.__warn()
return isinstance(instance, self._subclasses) or super().__instancecheck__(instance)
def __subclasscheck__(self, subclass):
self.__warn()
return issubclass(subclass, self._subclasses) or super().__subclasscheck__(subclass)
class InvalidQuery(Exception, metaclass=InvalidQueryType):
pass
def subclasses(cls):
yield cls
for subclass in cls.__subclasses__():
yield from subclasses(subclass)
class Q(tree.Node):
"""
Encapsulate filters as objects that can then be combined logically (using
`&` and `|`).
"""
# Connection types
AND = 'AND'
OR = 'OR'
default = AND
conditional = True
def __init__(self, *args, _connector=None, _negated=False, **kwargs):
super().__init__(children=[*args, *sorted(kwargs.items())], connector=_connector, negated=_negated)
def _combine(self, other, conn):
if not isinstance(other, Q):
raise TypeError(other)
# If the other Q() is empty, ignore it and just use `self`.
if not other:
return copy.deepcopy(self)
# Or if this Q is empty, ignore it and just use `other`.
elif not self:
return copy.deepcopy(other)
obj = type(self)()
obj.connector = conn
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __invert__(self):
obj = type(self)()
obj.add(self, self.AND)
obj.negate()
return obj
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# We must promote any new joins to left outer joins so that when Q is
# used as an expression, rows aren't filtered due to joins.
clause, joins = query._add_q(
self, reuse, allow_joins=allow_joins, split_subq=False,
check_filterable=False,
)
query.promote_joins(joins)
return clause
def deconstruct(self):
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
if path.startswith('django.db.models.query_utils'):
path = path.replace('django.db.models.query_utils', 'django.db.models')
args, kwargs = (), {}
if len(self.children) == 1 and not isinstance(self.children[0], Q):
child = self.children[0]
kwargs = {child[0]: child[1]}
else:
args = tuple(self.children)
if self.connector != self.default:
kwargs = {'_connector': self.connector}
if self.negated:
kwargs['_negated'] = True
return path, args, kwargs
class DeferredAttribute:
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field):
self.field = field
def __get__(self, instance, cls=None):
"""
Retrieve and caches the value from the datastore on the first lookup.
Return the cached value.
"""
if instance is None:
return self
data = instance.__dict__
field_name = self.field.attname
if field_name not in data:
# Let's see if the field is part of the parent chain. If so we
# might be able to reuse the already loaded value. Refs #18343.
val = self._check_parent_chain(instance)
if val is None:
instance.refresh_from_db(fields=[field_name])
else:
data[field_name] = val
return data[field_name]
def _check_parent_chain(self, instance):
"""
Check if the field value can be fetched from a parent field already
loaded in the instance. This can be done if the to-be fetched
field is a primary key field.
"""
opts = instance._meta
link_field = opts.get_ancestor_link(self.field.model)
if self.field.primary_key and self.field != link_field:
return getattr(instance, link_field.attname)
return None
class RegisterLookupMixin:
@classmethod
def _get_lookup(cls, lookup_name):
return cls.get_lookups().get(lookup_name, None)
@classmethod
@functools.lru_cache(maxsize=None)
def get_lookups(cls):
class_lookups = [parent.__dict__.get('class_lookups', {}) for parent in inspect.getmro(cls)]
return cls.merge_dicts(class_lookups)
def get_lookup(self, lookup_name):
from django.db.models.lookups import Lookup
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_lookup(lookup_name)
if found is not None and not issubclass(found, Lookup):
return None
return found
def get_transform(self, lookup_name):
from django.db.models.lookups import Transform
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_transform(lookup_name)
if found is not None and not issubclass(found, Transform):
return None
return found
@staticmethod
def merge_dicts(dicts):
"""
Merge dicts in reverse to preference the order of the original list. e.g.,
merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'.
"""
merged = {}
for d in reversed(dicts):
merged.update(d)
return merged
@classmethod
def _clear_cached_lookups(cls):
for subclass in subclasses(cls):
subclass.get_lookups.cache_clear()
@classmethod
def register_lookup(cls, lookup, lookup_name=None):
if lookup_name is None:
lookup_name = lookup.lookup_name
if 'class_lookups' not in cls.__dict__:
cls.class_lookups = {}
cls.class_lookups[lookup_name] = lookup
cls._clear_cached_lookups()
return lookup
@classmethod
def _unregister_lookup(cls, lookup, lookup_name=None):
"""
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
"""
if lookup_name is None:
lookup_name = lookup.lookup_name
del cls.class_lookups[lookup_name]
def select_related_descend(field, restricted, requested, load_fields, reverse=False):
"""
Return True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(sql.query.fill_related_selections()) and the model instance creation code
(query.get_klass_info()).
Arguments:
* field - the field to be checked
* restricted - a boolean field, indicating if the field list has been
manually restricted using a requested clause)
* requested - The select_related() dictionary.
* load_fields - the set of fields to be loaded on this model
* reverse - boolean, True if we are checking a reverse select related
"""
if not field.remote_field:
return False
if field.remote_field.parent_link and not reverse:
return False
if restricted:
if reverse and field.related_query_name() not in requested:
return False
if not reverse and field.name not in requested:
return False
if not restricted and field.null:
return False
if load_fields:
if field.attname not in load_fields:
if restricted and field.name in requested:
msg = (
'Field %s.%s cannot be both deferred and traversed using '
'select_related at the same time.'
) % (field.model._meta.object_name, field.name)
raise FieldError(msg)
return True
def refs_expression(lookup_parts, annotations):
"""
Check if the lookup_parts contains references to the given annotations set.
Because the LOOKUP_SEP is contained in the default annotation names, check
each prefix of the lookup_parts for a match.
"""
for n in range(1, len(lookup_parts) + 1):
level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])
if level_n_lookup in annotations and annotations[level_n_lookup]:
return annotations[level_n_lookup], lookup_parts[n:]
return False, ()
def check_rel_lookup_compatibility(model, target_opts, field):
"""
Check that self.model is compatible with target_opts. Compatibility
is OK if:
1) model and opts match (where proxy inheritance is removed)
2) model is parent of opts' model or the other way around
"""
def check(opts):
return (
model._meta.concrete_model == opts.concrete_model or
opts.concrete_model in model._meta.get_parent_list() or
model in opts.get_parent_list()
)
# If the field is a primary key, then doing a query against the field's
# model is ok, too. Consider the case:
# class Restaurant(models.Model):
# place = OneToOneField(Place, primary_key=True):
# Restaurant.objects.filter(pk__in=Restaurant.objects.all()).
# If we didn't have the primary key check, then pk__in (== place__in) would
# give Place's opts as the target opts, but Restaurant isn't compatible
# with that. This logic applies only to primary keys, as when doing __in=qs,
# we are going to turn this into __in=qs.values('pk') later on.
return (
check(target_opts) or
(getattr(field, 'primary_key', False) and check(field.model._meta))
)
class FilteredRelation:
"""Specify custom filtering in the ON clause of SQL joins."""
def __init__(self, relation_name, *, condition=Q()):
if not relation_name:
raise ValueError('relation_name cannot be empty.')
self.relation_name = relation_name
self.alias = None
if not isinstance(condition, Q):
raise ValueError('condition argument must be a Q() instance.')
self.condition = condition
self.path = []
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.relation_name == other.relation_name and
self.alias == other.alias and
self.condition == other.condition
)
def clone(self):
clone = FilteredRelation(self.relation_name, condition=self.condition)
clone.alias = self.alias
clone.path = self.path[:]
return clone
def resolve_expression(self, *args, **kwargs):
"""
QuerySet.annotate() only accepts expression-like arguments
(with a resolve_expression() method).
"""
raise NotImplementedError('FilteredRelation.resolve_expression() is unused.')
def as_sql(self, compiler, connection):
# Resolve the condition in Join.filtered_relation.
query = compiler.query
where = query.build_filtered_relation_q(self.condition, reuse=set(self.path))
return compiler.compile(where)
|
{
"content_hash": "a284c48925b60ff7d7b4bb9add158815",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 108,
"avg_line_length": 35.82905982905983,
"alnum_prop": 0.6285782442748091,
"repo_name": "googleinterns/django",
"id": "f7c6d74e728a3f6e6b34c917099ac288f5aff540",
"size": "12576",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "django/db/models/query_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "79183"
},
{
"name": "HTML",
"bytes": "228941"
},
{
"name": "JavaScript",
"bytes": "136792"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "14076970"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
}
|
import os
APP_PATH = os.path.join(*os.path.split(os.path.dirname(os.path.realpath(__file__)))[:-1])
import sys
if APP_PATH not in sys.path:
sys.path.append(APP_PATH)
import PowerShell
import MyTrack
import Phonebook
import Log
LOGGER = Log.MyLog(name=__name__)
def ce500(instructor, trainees):
instrc = "".join([c for c in instructor if c.isdigit()])
if instrc:
if not PowerShell.cleanup('01', instrc):
LOGGER.error("Failed to clean-up: CE500 instructor Interconnect still connected to epic-trn%s" % instrc)
return False
if not MyTrack.unassign("Instructors", "train01"):
LOGGER.error("Failed to save change to database: CE500 instructor Interconnect still taken by epic-trn%s"
% instrc)
if not Phonebook.TrnPhonebook().reset():
LOGGER.error("Training Phonebook not reset")
return False
if not unassign_interconnects("CE500", trainees):
return False
return True
def funds(trainees):
if not unassign_interconnects("AMB_IP", trainees):
return False
return True
# # # #
def unassign_interconnects(_class, trns):
"""
:param _class: <string>
:param trns: <list(tuple)> [(interconnect, cache)]
:return:
"""
total_success = True
for trn in trns:
this_success = True
if trn:
interconnect = "".join([s for s in trn[0] if s.isdigit()])
cache = "".join([s for s in trn[1] if s.isdigit()])
if not PowerShell.cleanup(interconnect, cache):
LOGGER.error("Failed clean-up: train%s still connected to epic-trn%s" % (interconnect, cache))
total_success = False
this_success = False
if not MyTrack.unassign(_class, "train"+interconnect):
LOGGER.error("Failed to save change to database: train%s still taken by epic-trn%s" % (interconnect, cache))
total_success = False
this_success = False
if this_success:
LOGGER.info("epic-trn%s successfully cleaned up" % cache)
return total_success
# # # #
if __name__ == "__main__":
import datetime
yesterday = (datetime.datetime.now() - datetime.timedelta(days=1)).strftime("%m/%d/%Y") # MM/DD/YYYY
print("Cleaning up classes from %s:" % yesterday)
classes = MyTrack.cleanup_schedule(yesterday)
funds([MyTrack.get_funds("epic-trn"+_class[0]) for _class in classes])
for _class in classes:
print("\t%s" % _class[0])
|
{
"content_hash": "690d80449db3d932c8dd014f7f23d39f",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 124,
"avg_line_length": 29.352272727272727,
"alnum_prop": 0.6035617499032133,
"repo_name": "jabez007/Training_Helpyr",
"id": "eae700a39b20814986dfef2fb88f3d73013ca878",
"size": "2583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Cleanup/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6585"
},
{
"name": "PowerShell",
"bytes": "11308"
},
{
"name": "Python",
"bytes": "84570"
}
],
"symlink_target": ""
}
|
"""product and picture
Revision ID: 164e05aa3e84
Revises: 2e4fb5b3a97
Create Date: 2015-01-16 11:54:09.927322
"""
# revision identifiers, used by Alembic.
revision = '164e05aa3e84'
down_revision = '2e4fb5b3a97'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('picture',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('width', sa.Integer(), nullable=True),
sa.Column('height', sa.Integer(), nullable=True),
sa.Column('filename', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('product',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('instock', sa.Boolean(), nullable=True),
sa.Column('price', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('product_pics',
sa.Column('product_id', sa.Integer(), nullable=True),
sa.Column('picture_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['picture_id'], ['picture.id'], ),
sa.ForeignKeyConstraint(['product_id'], ['product.id'], )
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('product_pics')
op.drop_table('product')
op.drop_table('picture')
### end Alembic commands ###
|
{
"content_hash": "5ef34b269f4d92c60f4f38b44732c449",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 64,
"avg_line_length": 32.5,
"alnum_prop": 0.6603076923076923,
"repo_name": "zknight/booster",
"id": "567df672983acd48b0a60e5a63c5921c54cc663a",
"size": "1625",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/164e05aa3e84_product_and_picture.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8304"
},
{
"name": "PHP",
"bytes": "38"
},
{
"name": "Python",
"bytes": "37825"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import socket
import tempfile
from datetime import datetime, timedelta
import pytest
from case import ANY, ContextMock, MagicMock, Mock, patch
from kombu import Queue
from celery import Task, group, uuid
from celery.app.task import _reprtask
from celery.exceptions import Ignore, ImproperlyConfigured, Retry
from celery.five import items, range, string_t
from celery.result import EagerResult
from celery.task.base import Task as OldTask
from celery.utils.time import parse_iso8601
try:
from urllib.error import HTTPError
except ImportError: # pragma: no cover
from urllib2 import HTTPError
def return_True(*args, **kwargs):
# Task run functions can't be closures/lambdas, as they're pickled.
return True
class MockApplyTask(Task):
abstract = True
applied = 0
def run(self, x, y):
return x * y
def apply_async(self, *args, **kwargs):
self.applied += 1
class TasksCase:
def setup(self):
self.mytask = self.app.task(shared=False)(return_True)
@self.app.task(bind=True, count=0, shared=False)
def increment_counter(self, increment_by=1):
self.count += increment_by or 1
return self.count
self.increment_counter = increment_counter
@self.app.task(shared=False)
def raising():
raise KeyError('foo')
self.raising = raising
@self.app.task(bind=True, max_retries=3, iterations=0, shared=False)
def retry_task(self, arg1, arg2, kwarg=1, max_retries=None, care=True):
self.iterations += 1
rmax = self.max_retries if max_retries is None else max_retries
assert repr(self.request)
retries = self.request.retries
if care and retries >= rmax:
return arg1
else:
raise self.retry(countdown=0, max_retries=rmax)
self.retry_task = retry_task
@self.app.task(bind=True, max_retries=3, iterations=0, shared=False)
def retry_task_noargs(self, **kwargs):
self.iterations += 1
if self.request.retries >= 3:
return 42
else:
raise self.retry(countdown=0)
self.retry_task_noargs = retry_task_noargs
@self.app.task(bind=True, max_retries=3, iterations=0,
base=MockApplyTask, shared=False)
def retry_task_mockapply(self, arg1, arg2, kwarg=1):
self.iterations += 1
retries = self.request.retries
if retries >= 3:
return arg1
raise self.retry(countdown=0)
self.retry_task_mockapply = retry_task_mockapply
@self.app.task(bind=True, max_retries=3, iterations=0, shared=False)
def retry_task_customexc(self, arg1, arg2, kwarg=1, **kwargs):
self.iterations += 1
retries = self.request.retries
if retries >= 3:
return arg1 + kwarg
else:
try:
raise MyCustomException('Elaine Marie Benes')
except MyCustomException as exc:
kwargs.update(kwarg=kwarg)
raise self.retry(countdown=0, exc=exc)
self.retry_task_customexc = retry_task_customexc
@self.app.task(bind=True, autoretry_for=(ZeroDivisionError,),
shared=False)
def autoretry_task_no_kwargs(self, a, b):
self.iterations += 1
return a / b
self.autoretry_task_no_kwargs = autoretry_task_no_kwargs
@self.app.task(bind=True, autoretry_for=(ZeroDivisionError,),
retry_kwargs={'max_retries': 5}, shared=False)
def autoretry_task(self, a, b):
self.iterations += 1
return a / b
self.autoretry_task = autoretry_task
@self.app.task(bind=True, autoretry_for=(HTTPError,),
retry_backoff=True, shared=False)
def autoretry_backoff_task(self, url):
self.iterations += 1
if "error" in url:
fp = tempfile.TemporaryFile()
raise HTTPError(url, '500', 'Error', '', fp)
return url
self.autoretry_backoff_task = autoretry_backoff_task
@self.app.task(bind=True, autoretry_for=(HTTPError,),
retry_backoff=True, retry_jitter=True, shared=False)
def autoretry_backoff_jitter_task(self, url):
self.iterations += 1
if "error" in url:
fp = tempfile.TemporaryFile()
raise HTTPError(url, '500', 'Error', '', fp)
return url
self.autoretry_backoff_jitter_task = autoretry_backoff_jitter_task
@self.app.task(bind=True)
def task_check_request_context(self):
assert self.request.hostname == socket.gethostname()
self.task_check_request_context = task_check_request_context
@self.app.task(ignore_result=True)
def task_with_ignored_result():
pass
self.task_with_ignored_result = task_with_ignored_result
# Remove all messages from memory-transport
from kombu.transport.memory import Channel
Channel.queues.clear()
class MyCustomException(Exception):
"""Random custom exception."""
class test_task_retries(TasksCase):
def test_retry(self):
self.retry_task.max_retries = 3
self.retry_task.iterations = 0
self.retry_task.apply([0xFF, 0xFFFF])
assert self.retry_task.iterations == 4
self.retry_task.max_retries = 3
self.retry_task.iterations = 0
self.retry_task.apply([0xFF, 0xFFFF], {'max_retries': 10})
assert self.retry_task.iterations == 11
def test_retry_no_args(self):
self.retry_task_noargs.max_retries = 3
self.retry_task_noargs.iterations = 0
self.retry_task_noargs.apply(propagate=True).get()
assert self.retry_task_noargs.iterations == 4
def test_signature_from_request__passes_headers(self):
self.retry_task.push_request()
self.retry_task.request.headers = {'custom': 10.1}
sig = self.retry_task.signature_from_request()
assert sig.options['headers']['custom'] == 10.1
def test_signature_from_request__delivery_info(self):
self.retry_task.push_request()
self.retry_task.request.delivery_info = {
'exchange': 'testex',
'routing_key': 'testrk',
}
sig = self.retry_task.signature_from_request()
assert sig.options['exchange'] == 'testex'
assert sig.options['routing_key'] == 'testrk'
def test_retry_kwargs_can_be_empty(self):
self.retry_task_mockapply.push_request()
try:
with pytest.raises(Retry):
import sys
try:
sys.exc_clear()
except AttributeError:
pass
self.retry_task_mockapply.retry(args=[4, 4], kwargs=None)
finally:
self.retry_task_mockapply.pop_request()
def test_retry_not_eager(self):
self.retry_task_mockapply.push_request()
try:
self.retry_task_mockapply.request.called_directly = False
exc = Exception('baz')
try:
self.retry_task_mockapply.retry(
args=[4, 4], kwargs={'task_retries': 0},
exc=exc, throw=False,
)
assert self.retry_task_mockapply.applied
finally:
self.retry_task_mockapply.applied = 0
try:
with pytest.raises(Retry):
self.retry_task_mockapply.retry(
args=[4, 4], kwargs={'task_retries': 0},
exc=exc, throw=True)
assert self.retry_task_mockapply.applied
finally:
self.retry_task_mockapply.applied = 0
finally:
self.retry_task_mockapply.pop_request()
def test_retry_with_kwargs(self):
self.retry_task_customexc.max_retries = 3
self.retry_task_customexc.iterations = 0
self.retry_task_customexc.apply([0xFF, 0xFFFF], {'kwarg': 0xF})
assert self.retry_task_customexc.iterations == 4
def test_retry_with_custom_exception(self):
self.retry_task_customexc.max_retries = 2
self.retry_task_customexc.iterations = 0
result = self.retry_task_customexc.apply(
[0xFF, 0xFFFF], {'kwarg': 0xF},
)
with pytest.raises(MyCustomException):
result.get()
assert self.retry_task_customexc.iterations == 3
def test_max_retries_exceeded(self):
self.retry_task.max_retries = 2
self.retry_task.iterations = 0
result = self.retry_task.apply([0xFF, 0xFFFF], {'care': False})
with pytest.raises(self.retry_task.MaxRetriesExceededError):
result.get()
assert self.retry_task.iterations == 3
self.retry_task.max_retries = 1
self.retry_task.iterations = 0
result = self.retry_task.apply([0xFF, 0xFFFF], {'care': False})
with pytest.raises(self.retry_task.MaxRetriesExceededError):
result.get()
assert self.retry_task.iterations == 2
def test_autoretry_no_kwargs(self):
self.autoretry_task_no_kwargs.max_retries = 3
self.autoretry_task_no_kwargs.iterations = 0
self.autoretry_task_no_kwargs.apply((1, 0))
assert self.autoretry_task_no_kwargs.iterations == 4
def test_autoretry(self):
self.autoretry_task.max_retries = 3
self.autoretry_task.iterations = 0
self.autoretry_task.apply((1, 0))
assert self.autoretry_task.iterations == 6
@patch('random.randrange', side_effect=lambda i: i - 1)
def test_autoretry_backoff(self, randrange):
task = self.autoretry_backoff_task
task.max_retries = 3
task.iterations = 0
with patch.object(task, 'retry', wraps=task.retry) as fake_retry:
task.apply(("http://httpbin.org/error",))
assert task.iterations == 4
retry_call_countdowns = [
call[1]['countdown'] for call in fake_retry.call_args_list
]
assert retry_call_countdowns == [1, 2, 4, 8]
@patch('random.randrange', side_effect=lambda i: i - 2)
def test_autoretry_backoff_jitter(self, randrange):
task = self.autoretry_backoff_jitter_task
task.max_retries = 3
task.iterations = 0
with patch.object(task, 'retry', wraps=task.retry) as fake_retry:
task.apply(("http://httpbin.org/error",))
assert task.iterations == 4
retry_call_countdowns = [
call[1]['countdown'] for call in fake_retry.call_args_list
]
assert retry_call_countdowns == [0, 1, 3, 7]
def test_retry_wrong_eta_when_not_enable_utc(self):
"""Issue #3753"""
self.app.conf.enable_utc = False
self.app.conf.timezone = 'US/Eastern'
self.autoretry_task.iterations = 0
self.autoretry_task.default_retry_delay = 2
self.autoretry_task.apply((1, 0))
assert self.autoretry_task.iterations == 6
class test_canvas_utils(TasksCase):
def test_si(self):
assert self.retry_task.si()
assert self.retry_task.si().immutable
def test_chunks(self):
assert self.retry_task.chunks(range(100), 10)
def test_map(self):
assert self.retry_task.map(range(100))
def test_starmap(self):
assert self.retry_task.starmap(range(100))
def test_on_success(self):
self.retry_task.on_success(1, 1, (), {})
class test_tasks(TasksCase):
def now(self):
return self.app.now()
def test_typing(self):
@self.app.task()
def add(x, y, kw=1):
pass
with pytest.raises(TypeError):
add.delay(1)
with pytest.raises(TypeError):
add.delay(1, kw=2)
with pytest.raises(TypeError):
add.delay(1, 2, foobar=3)
add.delay(2, 2)
def test_shadow_name(self):
def shadow_name(task, args, kwargs, options):
return 'fooxyz'
@self.app.task(shadow_name=shadow_name)
def shadowed():
pass
old_send_task = self.app.send_task
self.app.send_task = Mock()
shadowed.delay()
self.app.send_task.assert_called_once_with(ANY, ANY, ANY,
compression=ANY,
delivery_mode=ANY,
exchange=ANY,
expires=ANY,
immediate=ANY,
link=ANY,
link_error=ANY,
mandatory=ANY,
priority=ANY,
producer=ANY,
queue=ANY,
result_cls=ANY,
routing_key=ANY,
serializer=ANY,
soft_time_limit=ANY,
task_id=ANY,
task_type=ANY,
time_limit=ANY,
shadow='fooxyz',
ignore_result=False)
self.app.send_task = old_send_task
def test_shadow_name_old_task_class(self):
def shadow_name(task, args, kwargs, options):
return 'fooxyz'
@self.app.task(base=OldTask, shadow_name=shadow_name)
def shadowed():
pass
old_send_task = self.app.send_task
self.app.send_task = Mock()
shadowed.delay()
self.app.send_task.assert_called_once_with(ANY, ANY, ANY,
compression=ANY,
delivery_mode=ANY,
exchange=ANY,
expires=ANY,
immediate=ANY,
link=ANY,
link_error=ANY,
mandatory=ANY,
priority=ANY,
producer=ANY,
queue=ANY,
result_cls=ANY,
routing_key=ANY,
serializer=ANY,
soft_time_limit=ANY,
task_id=ANY,
task_type=ANY,
time_limit=ANY,
shadow='fooxyz',
ignore_result=False)
self.app.send_task = old_send_task
def test_typing__disabled(self):
@self.app.task(typing=False)
def add(x, y, kw=1):
pass
add.delay(1)
add.delay(1, kw=2)
add.delay(1, 2, foobar=3)
def test_typing__disabled_by_app(self):
with self.Celery(set_as_current=False, strict_typing=False) as app:
@app.task()
def add(x, y, kw=1):
pass
assert not add.typing
add.delay(1)
add.delay(1, kw=2)
add.delay(1, 2, foobar=3)
@pytest.mark.usefixtures('depends_on_current_app')
def test_unpickle_task(self):
import pickle
@self.app.task(shared=True)
def xxx():
pass
assert pickle.loads(pickle.dumps(xxx)) is xxx.app.tasks[xxx.name]
@patch('celery.app.task.current_app')
@pytest.mark.usefixtures('depends_on_current_app')
def test_bind__no_app(self, current_app):
class XTask(Task):
_app = None
XTask._app = None
XTask.__bound__ = False
XTask.bind = Mock(name='bind')
assert XTask.app is current_app
XTask.bind.assert_called_with(current_app)
def test_reprtask__no_fmt(self):
assert _reprtask(self.mytask)
def test_AsyncResult(self):
task_id = uuid()
result = self.retry_task.AsyncResult(task_id)
assert result.backend == self.retry_task.backend
assert result.id == task_id
def assert_next_task_data_equal(self, consumer, presult, task_name,
test_eta=False, test_expires=False,
properties=None, headers=None, **kwargs):
next_task = consumer.queues[0].get(accept=['pickle', 'json'])
task_properties = next_task.properties
task_headers = next_task.headers
task_body = next_task.decode()
task_args, task_kwargs, embed = task_body
assert task_headers['id'] == presult.id
assert task_headers['task'] == task_name
if test_eta:
assert isinstance(task_headers.get('eta'), string_t)
to_datetime = parse_iso8601(task_headers.get('eta'))
assert isinstance(to_datetime, datetime)
if test_expires:
assert isinstance(task_headers.get('expires'), string_t)
to_datetime = parse_iso8601(task_headers.get('expires'))
assert isinstance(to_datetime, datetime)
properties = properties or {}
for arg_name, arg_value in items(properties):
assert task_properties.get(arg_name) == arg_value
headers = headers or {}
for arg_name, arg_value in items(headers):
assert task_headers.get(arg_name) == arg_value
for arg_name, arg_value in items(kwargs):
assert task_kwargs.get(arg_name) == arg_value
def test_incomplete_task_cls(self):
class IncompleteTask(Task):
app = self.app
name = 'c.unittest.t.itask'
with pytest.raises(NotImplementedError):
IncompleteTask().run()
def test_task_kwargs_must_be_dictionary(self):
with pytest.raises(TypeError):
self.increment_counter.apply_async([], 'str')
def test_task_args_must_be_list(self):
with pytest.raises(TypeError):
self.increment_counter.apply_async('s', {})
def test_regular_task(self):
assert isinstance(self.mytask, Task)
assert self.mytask.run()
assert callable(self.mytask)
assert self.mytask(), 'Task class runs run() when called'
with self.app.connection_or_acquire() as conn:
consumer = self.app.amqp.TaskConsumer(conn)
with pytest.raises(NotImplementedError):
consumer.receive('foo', 'foo')
consumer.purge()
assert consumer.queues[0].get() is None
self.app.amqp.TaskConsumer(conn, queues=[Queue('foo')])
# Without arguments.
presult = self.mytask.delay()
self.assert_next_task_data_equal(
consumer, presult, self.mytask.name)
# With arguments.
presult2 = self.mytask.apply_async(
kwargs={'name': 'George Costanza'},
)
self.assert_next_task_data_equal(
consumer, presult2, self.mytask.name, name='George Costanza',
)
# send_task
sresult = self.app.send_task(self.mytask.name,
kwargs={'name': 'Elaine M. Benes'})
self.assert_next_task_data_equal(
consumer, sresult, self.mytask.name, name='Elaine M. Benes',
)
# With ETA.
presult2 = self.mytask.apply_async(
kwargs={'name': 'George Costanza'},
eta=self.now() + timedelta(days=1),
expires=self.now() + timedelta(days=2),
)
self.assert_next_task_data_equal(
consumer, presult2, self.mytask.name,
name='George Costanza', test_eta=True, test_expires=True,
)
# With countdown.
presult2 = self.mytask.apply_async(
kwargs={'name': 'George Costanza'}, countdown=10, expires=12,
)
self.assert_next_task_data_equal(
consumer, presult2, self.mytask.name,
name='George Costanza', test_eta=True, test_expires=True,
)
# Default argsrepr/kwargsrepr behavior
presult2 = self.mytask.apply_async(
args=('spam',), kwargs={'name': 'Jerry Seinfeld'}
)
self.assert_next_task_data_equal(
consumer, presult2, self.mytask.name,
headers={'argsrepr': "('spam',)",
'kwargsrepr': "{'name': 'Jerry Seinfeld'}"},
)
# With argsrepr/kwargsrepr
presult2 = self.mytask.apply_async(
args=('secret',), argsrepr="'***'",
kwargs={'password': 'foo'}, kwargsrepr="{'password': '***'}",
)
self.assert_next_task_data_equal(
consumer, presult2, self.mytask.name,
headers={'argsrepr': "'***'",
'kwargsrepr': "{'password': '***'}"},
)
# Discarding all tasks.
consumer.purge()
self.mytask.apply_async()
assert consumer.purge() == 1
assert consumer.queues[0].get() is None
assert not presult.successful()
self.mytask.backend.mark_as_done(presult.id, result=None)
assert presult.successful()
def test_send_event(self):
mytask = self.mytask._get_current_object()
mytask.app.events = Mock(name='events')
mytask.app.events.attach_mock(ContextMock(), 'default_dispatcher')
mytask.request.id = 'fb'
mytask.send_event('task-foo', id=3122)
mytask.app.events.default_dispatcher().send.assert_called_with(
'task-foo', uuid='fb', id=3122,
retry=True, retry_policy=self.app.conf.task_publish_retry_policy)
def test_replace(self):
sig1 = Mock(name='sig1')
sig1.options = {}
with pytest.raises(Ignore):
self.mytask.replace(sig1)
def test_replace_with_chord(self):
sig1 = Mock(name='sig1')
sig1.options = {'chord': None}
with pytest.raises(ImproperlyConfigured):
self.mytask.replace(sig1)
@pytest.mark.usefixtures('depends_on_current_app')
def test_replace_callback(self):
c = group([self.mytask.s()], app=self.app)
c.freeze = Mock(name='freeze')
c.delay = Mock(name='delay')
self.mytask.request.id = 'id'
self.mytask.request.group = 'group'
self.mytask.request.root_id = 'root_id'
self.mytask.request.callbacks = 'callbacks'
self.mytask.request.errbacks = 'errbacks'
class JsonMagicMock(MagicMock):
parent = None
def __json__(self):
return 'whatever'
def reprcall(self, *args, **kwargs):
return 'whatever2'
mocked_signature = JsonMagicMock(name='s')
accumulate_mock = JsonMagicMock(name='accumulate', s=mocked_signature)
self.mytask.app.tasks['celery.accumulate'] = accumulate_mock
try:
self.mytask.replace(c)
except Ignore:
mocked_signature.return_value.set.assert_called_with(
link='callbacks',
link_error='errbacks',
)
def test_replace_group(self):
c = group([self.mytask.s()], app=self.app)
c.freeze = Mock(name='freeze')
c.delay = Mock(name='delay')
self.mytask.request.id = 'id'
self.mytask.request.group = 'group'
self.mytask.request.root_id = 'root_id',
with pytest.raises(Ignore):
self.mytask.replace(c)
def test_add_trail__no_trail(self):
mytask = self.increment_counter._get_current_object()
mytask.trail = False
mytask.add_trail('foo')
def test_repr_v2_compat(self):
self.mytask.__v2_compat__ = True
assert 'v2 compatible' in repr(self.mytask)
def test_context_get(self):
self.mytask.push_request()
try:
request = self.mytask.request
request.foo = 32
assert request.get('foo') == 32
assert request.get('bar', 36) == 36
request.clear()
finally:
self.mytask.pop_request()
def test_annotate(self):
with patch('celery.app.task.resolve_all_annotations') as anno:
anno.return_value = [{'FOO': 'BAR'}]
@self.app.task(shared=False)
def task():
pass
task.annotate()
assert task.FOO == 'BAR'
def test_after_return(self):
self.mytask.push_request()
try:
self.mytask.request.chord = self.mytask.s()
self.mytask.after_return('SUCCESS', 1.0, 'foobar', (), {}, None)
self.mytask.request.clear()
finally:
self.mytask.pop_request()
def test_update_state(self):
@self.app.task(shared=False)
def yyy():
pass
yyy.push_request()
try:
tid = uuid()
yyy.update_state(tid, 'FROBULATING', {'fooz': 'baaz'})
assert yyy.AsyncResult(tid).status == 'FROBULATING'
assert yyy.AsyncResult(tid).result == {'fooz': 'baaz'}
yyy.request.id = tid
yyy.update_state(state='FROBUZATING', meta={'fooz': 'baaz'})
assert yyy.AsyncResult(tid).status == 'FROBUZATING'
assert yyy.AsyncResult(tid).result == {'fooz': 'baaz'}
finally:
yyy.pop_request()
def test_repr(self):
@self.app.task(shared=False)
def task_test_repr():
pass
assert 'task_test_repr' in repr(task_test_repr)
def test_has___name__(self):
@self.app.task(shared=False)
def yyy2():
pass
assert yyy2.__name__
class test_apply_task(TasksCase):
def test_apply_throw(self):
with pytest.raises(KeyError):
self.raising.apply(throw=True)
def test_apply_with_task_eager_propagates(self):
self.app.conf.task_eager_propagates = True
with pytest.raises(KeyError):
self.raising.apply()
def test_apply_request_context_is_ok(self):
self.app.conf.task_eager_propagates = True
self.task_check_request_context.apply()
def test_apply(self):
self.increment_counter.count = 0
e = self.increment_counter.apply()
assert isinstance(e, EagerResult)
assert e.get() == 1
e = self.increment_counter.apply(args=[1])
assert e.get() == 2
e = self.increment_counter.apply(kwargs={'increment_by': 4})
assert e.get() == 6
assert e.successful()
assert e.ready()
assert repr(e).startswith('<EagerResult:')
f = self.raising.apply()
assert f.ready()
assert not f.successful()
assert f.traceback
with pytest.raises(KeyError):
f.get()
class test_apply_async(TasksCase):
def common_send_task_arguments(self):
return (ANY, ANY, ANY), dict(
compression=ANY,
delivery_mode=ANY,
exchange=ANY,
expires=ANY,
immediate=ANY,
link=ANY,
link_error=ANY,
mandatory=ANY,
priority=ANY,
producer=ANY,
queue=ANY,
result_cls=ANY,
routing_key=ANY,
serializer=ANY,
soft_time_limit=ANY,
task_id=ANY,
task_type=ANY,
time_limit=ANY,
shadow=None,
ignore_result=False
)
def test_task_with_ignored_result(self):
with patch.object(self.app, 'send_task') as send_task:
self.task_with_ignored_result.apply_async()
expected_args, expected_kwargs = self.common_send_task_arguments()
expected_kwargs['ignore_result'] = True
send_task.assert_called_once_with(
*expected_args,
**expected_kwargs
)
def test_task_with_result(self):
with patch.object(self.app, 'send_task') as send_task:
self.mytask.apply_async()
expected_args, expected_kwargs = self.common_send_task_arguments()
send_task.assert_called_once_with(
*expected_args,
**expected_kwargs
)
def test_task_with_result_ignoring_on_call(self):
with patch.object(self.app, 'send_task') as send_task:
self.mytask.apply_async(ignore_result=True)
expected_args, expected_kwargs = self.common_send_task_arguments()
expected_kwargs['ignore_result'] = True
send_task.assert_called_once_with(
*expected_args,
**expected_kwargs
)
|
{
"content_hash": "86db084160d1efc0dd13bd6eb41dbc34",
"timestamp": "",
"source": "github",
"line_count": 854,
"max_line_length": 79,
"avg_line_length": 35.040983606557376,
"alnum_prop": 0.5365079365079365,
"repo_name": "cloudera/hue",
"id": "8136ca472f93750561462527a15ee5f496fd0e70",
"size": "29925",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/celery-4.2.1/t/unit/tasks/test_tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
import xerox
from jsonapp.utils.cli_util import cli_print, do_copy_to_clipboard
from jsonapp.utils.print_util import print_with_separators_and_space
from jsonapp.utils.result_util import result_failure, result_success
from jsonapp.utils.string_util import String
script_output = 'script'
user_output = 'user'
class JSONApp(object):
def __init__(self):
pass
def run(self, clipboard=None, json_string=None, copy_to_clipboard=None, output=None, pager=False):
if clipboard:
json_string = xerox.paste()
self.format(json_string=json_string, copy_to_clipboard=True, output=output, pager=pager)
elif json_string:
self.format(json_string=json_string, copy_to_clipboard=copy_to_clipboard, output=output, pager=pager)
else:
return result_failure()
return result_success()
def format(self, json_string, copy_to_clipboard, output, pager):
if not json_string:
cli_print('No JSON string.')
return
formatted_json_string = String(json_string).to_pretty_json_string()
result = formatted_json_string
if output == user_output:
print_with_separators_and_space(result, pager=pager)
elif output == script_output:
print(result)
else:
raise NotImplementedError
if copy_to_clipboard:
do_copy_to_clipboard(result)
json_app = JSONApp()
|
{
"content_hash": "40e8d12ebb2bb232d66d62020a29bf98",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 113,
"avg_line_length": 28.49019607843137,
"alnum_prop": 0.6496902959394356,
"repo_name": "eyalev/jsonapp",
"id": "2e5ccf37812fc919c64610cd0b3ff53b878d3c01",
"size": "1478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsonapp/core/jsonapp_main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "20816"
}
],
"symlink_target": ""
}
|
import os
import json
from common import *
from subprocess import Popen
from collections import namedtuple
from tempfile import NamedTemporaryFile
connection = namedtuple("connection",
"host host_port remote remote_port protocol timestamp, pid")
def ipconnections(target, **kwargs):
"""Returns a list of ip connections made by the target.
A connection is a named tuple with the following properties:
host (string), host_port (int), remote_port (string), protocol (string),
timestamp(int).
"""
if not target:
raise Exception("Invalid target for ipconnections()")
output_file = NamedTemporaryFile()
cmd = ["sudo", "/usr/sbin/dtrace", "-C"]
if "timeout" in kwargs:
cmd += ["-DANALYSIS_TIMEOUT=%d" % kwargs["timeout"]]
cmd += ["-s", path_for_script("ipconnections.d")]
cmd += ["-o", output_file.name]
if "args" in kwargs:
line = "%s %s" % (sanitize_path(target), " ".join(kwargs["args"]))
cmd += ["-c", line]
else:
cmd += ["-c", sanitize_path(target)]
# The dtrace script will take care of timeout itself, so we just launch
# it asynchronously
with open(os.devnull, "w") as f:
handler = Popen(cmd, stdout=f, stderr=f)
for entry in filelines(output_file):
if "## ipconnections.d done ##" in entry.strip():
break
yield _parse_single_entry(entry.strip())
output_file.close()
#
# Parsing implementation details
#
def _parse_single_entry(entry):
entry = entry.replace("\\0", "")
parsed = json.loads(entry)
host = parsed['host']
host_port = parsed['host_port']
remote = parsed['remote']
remote_port = parsed['remote_port']
protocol = parsed['protocol']
timestamp = parsed['timestamp']
pid = parsed['pid']
return connection(host, host_port, remote, remote_port, protocol, timestamp, pid)
|
{
"content_hash": "4ec5039d9185e07d011129dd29a68f9d",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 85,
"avg_line_length": 31.147540983606557,
"alnum_prop": 0.6321052631578947,
"repo_name": "rodionovd/cuckoo-osx-analyzer",
"id": "f63f3c49307710b0197dbc882ed2025279535d1e",
"size": "2080",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "analyzer/darwin/lib/dtrace/ipconnections.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9292"
},
{
"name": "DTrace",
"bytes": "8642"
},
{
"name": "Python",
"bytes": "87690"
},
{
"name": "Shell",
"bytes": "29789"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UltrasoundScanning.saved_ultrasound_image'
db.add_column(u'patient_ultrasoundscanning', 'saved_ultrasound_image',
self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UltrasoundScanning.saved_ultrasound_image'
db.delete_column(u'patient_ultrasoundscanning', 'saved_ultrasound_image')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'patient.additionalpatientinformation': {
'Meta': {'object_name': 'AdditionalPatientInformation'},
'alcohol': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cigarettes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cooking_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'literate': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'other_harmful_substances': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'psychological_stress': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'toilet_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'patient.familymedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'FamilyMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.guardian': {
'Meta': {'object_name': 'Guardian'},
'contact_number': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'home_address': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'relation': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.gynaecologicalhistory': {
'Meta': {'object_name': 'GynaecologicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_last_pap_smear': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method_of_birth_control': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'previous_surgery': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PreviousSurgery']"}),
'result_pap_smear': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.immunizationhistory': {
'Meta': {'object_name': 'ImmunizationHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'tetanus_toxoid1': ('django.db.models.fields.DateTimeField', [], {}),
'tetanus_toxoid2': ('django.db.models.fields.DateTimeField', [], {}),
'tetanus_toxoid3': ('django.db.models.fields.DateTimeField', [], {}),
'vaccination': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.laboratorytest': {
'Meta': {'object_name': 'LaboratoryTest'},
'blood_group': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'hemoglobin': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'serological_test_for_syphilis': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'urinalysis': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.medicalhistory': {
'Meta': {'object_name': 'MedicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'family_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.FamilyMedicalHistory']"}),
'gynaecological_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.GynaecologicalHistory']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immunization_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ImmunizationHistory']"}),
'menstrual_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.MenstrualHistory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'obstetric_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ObstetricHistory']"}),
'past_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PastMedicalHistory']"}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'present_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PresentMedicalHistory']"})
},
u'patient.menstrualhistory': {
'Meta': {'object_name': 'MenstrualHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'day_of_visit': ('django.db.models.fields.DateField', [], {}),
'expected_date_of_delivery': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_menstrual_periods': ('django.db.models.fields.DateField', [], {}),
'menstrual_cycle': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'poa_by_lmp': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'patient.obstetrichistory': {
'Meta': {'object_name': 'ObstetricHistory'},
'check_if_you_have_been_miscarriages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'check_if_you_have_been_pregnant': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_previous_obstetric_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PreviousObstetricHistory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"})
},
u'patient.pastmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PastMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.patientinformation': {
'Meta': {'object_name': 'PatientInformation'},
'address': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'marital_status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'operator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
u'patient.presentmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PresentMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.previousobstetrichistory': {
'Meta': {'object_name': 'PreviousObstetricHistory'},
'age_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'birth_weight': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length_of_pregnancy': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'obstetrical_operation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'periods_of_exclusive_feeding': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'problems': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'types_of_delivery': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.DateField', [], {})
},
u'patient.previoussurgery': {
'Meta': {'object_name': 'PreviousSurgery'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'endometriosis': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'fibrocystic_breasts': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'ovarian_cysts': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'uterine_fibroids': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'patient.report': {
'Meta': {'object_name': 'Report'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'diabetis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hiv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pregnancy': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.routinecheckup': {
'Meta': {'object_name': 'Routinecheckup'},
'abdominal_changes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'blood_pressure': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'chest_and_heart_auscultation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'fetal_movement': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_examiner': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'symptom_events': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'uterine_height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'vaginal_examination': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'visit': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'weight': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'patient.signanaemia': {
'Meta': {'object_name': 'Signanaemia'},
'conjunctiva': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fingernails': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'oral_mucosa': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pale_complexion': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'shortness_of_breath': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tip_of_tongue': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'patient.ultrasoundscanning': {
'AC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'BPD': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'CRL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'FL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'HC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'Meta': {'object_name': 'UltrasoundScanning'},
'amount_of_amniotic_fluid': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'gestation_age': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_examiner': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'position_of_the_baby': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'position_of_the_placenta': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'saved_ultrasound_image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['patient']
|
{
"content_hash": "13a5c8043516213cfc5bef6cad331c85",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 187,
"avg_line_length": 83.5635838150289,
"alnum_prop": 0.5674264171825822,
"repo_name": "aazhbd/medical_info01",
"id": "7ffe776c5f5956e16072b0d54cf6c3b21cf2e9ba",
"size": "28937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "patient/migrations/0008_auto__add_field_ultrasoundscanning_saved_ultrasound_image.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "54905"
},
{
"name": "HTML",
"bytes": "139815"
},
{
"name": "JavaScript",
"bytes": "1241861"
},
{
"name": "Python",
"bytes": "1336885"
},
{
"name": "Shell",
"bytes": "156"
}
],
"symlink_target": ""
}
|
"""
Copyright 2013 Jérémie BOUTOILLE
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from struct import unpack, pack
from viper.modules.pymacho.MachOLoadCommand import MachOLoadCommand
from viper.modules.pymacho.Utils import green
class MachOUUIDCommand(MachOLoadCommand):
uuid = ()
def __init__(self, macho_file=None, cmd=0):
self.cmd = cmd
if macho_file is not None:
self.parse(macho_file)
def parse(self, macho_file):
self.uuid = unpack("<BBBBBBBBBBBBBBBB", macho_file.read(16))
def write(self, macho_file):
before = macho_file.tell()
macho_file.write(pack('<II', self.cmd, 0x0))
macho_file.write(pack('<BBBBBBBBBBBBBBBB', self.uuid[0], self.uuid[1], self.uuid[2], self.uuid[3], self.uuid[4], self.uuid[5], self.uuid[6], \
self.uuid[7], self.uuid[8], self.uuid[9], self.uuid[10], self.uuid[11], self.uuid[12], \
self.uuid[13], self.uuid[14], self.uuid[15]))
after = macho_file.tell()
macho_file.seek(before+4)
macho_file.write(pack('<I', after-before))
macho_file.seek(after)
def display(self, before=''):
print before + green("[+]")+" LC_UUID"
print before + "\t- uuid : %02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-%02X%02X%02X%02X%02X%02X" \
% (self.uuid[0], self.uuid[1], self.uuid[2], self.uuid[3], self.uuid[4], self.uuid[5], self.uuid[6], \
self.uuid[7], self.uuid[8], self.uuid[9], self.uuid[10], self.uuid[11], self.uuid[12], \
self.uuid[13], self.uuid[14], self.uuid[15])
|
{
"content_hash": "3168612dfc3157e056097396fdd2b05c",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 150,
"avg_line_length": 42.09803921568628,
"alnum_prop": 0.6613879832324173,
"repo_name": "S2R2/viper",
"id": "8a06e6ca41cf207a121fcb51378d398105fd6029",
"size": "2168",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "viper/modules/pymacho/MachOUUIDCommand.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1306"
},
{
"name": "JavaScript",
"bytes": "9294"
},
{
"name": "Makefile",
"bytes": "436"
},
{
"name": "Python",
"bytes": "1552230"
},
{
"name": "Smarty",
"bytes": "28213"
}
],
"symlink_target": ""
}
|
"""
====================================================
Chebyshev Series (:mod:`numpy.polynomial.chebyshev`)
====================================================
This module provides a number of objects (mostly functions) useful for
dealing with Chebyshev series, including a `Chebyshev` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Classes
-------
.. autosummary::
:toctree: generated/
Chebyshev
Constants
---------
.. autosummary::
:toctree: generated/
chebdomain
chebzero
chebone
chebx
Arithmetic
----------
.. autosummary::
:toctree: generated/
chebadd
chebsub
chebmulx
chebmul
chebdiv
chebpow
chebval
chebval2d
chebval3d
chebgrid2d
chebgrid3d
Calculus
--------
.. autosummary::
:toctree: generated/
chebder
chebint
Misc Functions
--------------
.. autosummary::
:toctree: generated/
chebfromroots
chebroots
chebvander
chebvander2d
chebvander3d
chebgauss
chebweight
chebcompanion
chebfit
chebpts1
chebpts2
chebtrim
chebline
cheb2poly
poly2cheb
chebinterpolate
See also
--------
`numpy.polynomial`
Notes
-----
The implementations of multiplication, division, integration, and
differentiation use the algebraic identities [1]_:
.. math ::
T_n(x) = \\frac{z^n + z^{-n}}{2} \\\\
z\\frac{dx}{dz} = \\frac{z - z^{-1}}{2}.
where
.. math :: x = \\frac{z + z^{-1}}{2}.
These identities allow a Chebyshev series to be expressed as a finite,
symmetric Laurent series. In this module, this sort of Laurent series
is referred to as a "z-series."
References
----------
.. [1] A. T. Benjamin, et al., "Combinatorial Trigonometry with Chebyshev
Polynomials," *Journal of Statistical Planning and Inference 14*, 2008
(preprint: https://www.math.hmc.edu/~benjamin/papers/CombTrig.pdf, pg. 4)
"""
import numpy as np
import numpy.linalg as la
from numpy.core.multiarray import normalize_axis_index
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'chebzero', 'chebone', 'chebx', 'chebdomain', 'chebline', 'chebadd',
'chebsub', 'chebmulx', 'chebmul', 'chebdiv', 'chebpow', 'chebval',
'chebder', 'chebint', 'cheb2poly', 'poly2cheb', 'chebfromroots',
'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1',
'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d',
'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion',
'chebgauss', 'chebweight', 'chebinterpolate']
chebtrim = pu.trimcoef
#
# A collection of functions for manipulating z-series. These are private
# functions and do minimal error checking.
#
def _cseries_to_zseries(c):
"""Covert Chebyshev series to z-series.
Covert a Chebyshev series to the equivalent z-series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high
Returns
-------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
"""
n = c.size
zs = np.zeros(2*n-1, dtype=c.dtype)
zs[n-1:] = c/2
return zs + zs[::-1]
def _zseries_to_cseries(zs):
"""Covert z-series to a Chebyshev series.
Covert a z series to the equivalent Chebyshev series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
Parameters
----------
zs : 1-D ndarray
Odd length symmetric z-series, ordered from low to high.
Returns
-------
c : 1-D ndarray
Chebyshev coefficients, ordered from low to high.
"""
n = (zs.size + 1)//2
c = zs[n-1:].copy()
c[1:n] *= 2
return c
def _zseries_mul(z1, z2):
"""Multiply two z-series.
Multiply two z-series to produce a z-series.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D but this is not checked.
Returns
-------
product : 1-D ndarray
The product z-series.
Notes
-----
This is simply convolution. If symmetric/anti-symmetric z-series are
denoted by S/A then the following rules apply:
S*S, A*A -> S
S*A, A*S -> A
"""
return np.convolve(z1, z2)
def _zseries_div(z1, z2):
"""Divide the first z-series by the second.
Divide `z1` by `z2` and return the quotient and remainder as z-series.
Warning: this implementation only applies when both z1 and z2 have the
same symmetry, which is sufficient for present purposes.
Parameters
----------
z1, z2 : 1-D ndarray
The arrays must be 1-D and have the same symmetry, but this is not
checked.
Returns
-------
(quotient, remainder) : 1-D ndarrays
Quotient and remainder as z-series.
Notes
-----
This is not the same as polynomial division on account of the desired form
of the remainder. If symmetric/anti-symmetric z-series are denoted by S/A
then the following rules apply:
S/S -> S,S
A/A -> S,A
The restriction to types of the same symmetry could be fixed but seems like
unneeded generality. There is no natural form for the remainder in the case
where there is no symmetry.
"""
z1 = z1.copy()
z2 = z2.copy()
lc1 = len(z1)
lc2 = len(z2)
if lc2 == 1:
z1 /= z2
return z1, z1[:1]*0
elif lc1 < lc2:
return z1[:1]*0, z1
else:
dlen = lc1 - lc2
scl = z2[0]
z2 /= scl
quo = np.empty(dlen + 1, dtype=z1.dtype)
i = 0
j = dlen
while i < j:
r = z1[i]
quo[i] = z1[i]
quo[dlen - i] = r
tmp = r*z2
z1[i:i+lc2] -= tmp
z1[j:j+lc2] -= tmp
i += 1
j -= 1
r = z1[i]
quo[i] = r
tmp = r*z2
z1[i:i+lc2] -= tmp
quo /= scl
rem = z1[i+1:i-1+lc2].copy()
return quo, rem
def _zseries_der(zs):
"""Differentiate a z-series.
The derivative is with respect to x, not z. This is achieved using the
chain rule and the value of dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to differentiate.
Returns
-------
derivative : z-series
The derivative
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
multiplying the value of zs by two also so that the two cancels in the
division.
"""
n = len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs *= np.arange(-n, n+1)*2
d, r = _zseries_div(zs, ns)
return d
def _zseries_int(zs):
"""Integrate a z-series.
The integral is with respect to x, not z. This is achieved by a change
of variable using dx/dz given in the module notes.
Parameters
----------
zs : z-series
The z-series to integrate
Returns
-------
integral : z-series
The indefinite integral
Notes
-----
The zseries for x (ns) has been multiplied by two in order to avoid
using floats that are incompatible with Decimal and likely other
specialized scalar types. This scaling has been compensated by
dividing the resulting zs by two.
"""
n = 1 + len(zs)//2
ns = np.array([-1, 0, 1], dtype=zs.dtype)
zs = _zseries_mul(zs, ns)
div = np.arange(-n, n+1)*2
zs[:n] /= div[:n]
zs[n+1:] /= div[n+1:]
zs[n] = 0
return zs
#
# Chebyshev series functions
#
def poly2cheb(pol):
"""
Convert a polynomial to a Chebyshev series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Chebyshev series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Chebyshev
series.
See Also
--------
cheb2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(range(4))
>>> p
Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> c = p.convert(kind=P.Chebyshev)
>>> c
Chebyshev([1. , 3.25, 1. , 0.75], domain=[-1., 1.], window=[-1., 1.])
>>> P.chebyshev.poly2cheb(range(4))
array([1. , 3.25, 1. , 0.75])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = chebadd(chebmulx(res), pol[i])
return res
def cheb2poly(c):
"""
Convert a Chebyshev series to a polynomial.
Convert an array representing the coefficients of a Chebyshev series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Chebyshev series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2cheb
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> c = P.Chebyshev(range(4))
>>> c
Chebyshev([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([-2., -8., 4., 12.], domain=[-1., 1.], window=[-1., 1.])
>>> P.chebyshev.cheb2poly(range(4))
array([-2., -8., 4., 12.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1)
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Chebyshev default domain.
chebdomain = np.array([-1, 1])
# Chebyshev coefficients representing zero.
chebzero = np.array([0])
# Chebyshev coefficients representing one.
chebone = np.array([1])
# Chebyshev coefficients representing the identity x.
chebx = np.array([0, 1])
def chebline(off, scl):
"""
Chebyshev series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Chebyshev series for
``off + scl*x``.
See Also
--------
polyline
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebline(3,2)
array([3, 2])
>>> C.chebval(-3, C.chebline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def chebfromroots(roots):
"""
Generate a Chebyshev series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Chebyshev form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Chebyshev form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, hermfromroots, hermefromroots
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.25, 0. , 0.25])
>>> j = complex(0,1)
>>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([1.5+0.j, 0. +0.j, 0.5+0.j])
"""
return pu._fromroots(chebline, chebmul, roots)
def chebadd(c1, c2):
"""
Add one Chebyshev series to another.
Returns the sum of two Chebyshev series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Chebyshev series of their sum.
See Also
--------
chebsub, chebmulx, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Chebyshev series
is a Chebyshev series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebadd(c1,c2)
array([4., 4., 4.])
"""
return pu._add(c1, c2)
def chebsub(c1, c2):
"""
Subtract one Chebyshev series from another.
Returns the difference of two Chebyshev series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their difference.
See Also
--------
chebadd, chebmulx, chebmul, chebdiv, chebpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Chebyshev
series is a Chebyshev series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebsub(c1,c2)
array([-2., 0., 2.])
>>> C.chebsub(c2,c1) # -C.chebsub(c1,c2)
array([ 2., 0., -2.])
"""
return pu._sub(c1, c2)
def chebmulx(c):
"""Multiply a Chebyshev series by x.
Multiply the polynomial `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
.. versionadded:: 1.5.0
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> C.chebmulx([1,2,3])
array([1. , 2.5, 1. , 1.5])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
if len(c) > 1:
tmp = c[1:]/2
prd[2:] = tmp
prd[0:-2] += tmp
return prd
def chebmul(c1, c2):
"""
Multiply one Chebyshev series by another.
Returns the product of two Chebyshev series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Chebyshev series coefficients representing their product.
See Also
--------
chebadd, chebsub, chebmulx, chebdiv, chebpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Chebyshev polynomial basis set. Thus, to express
the product as a C-series, it is typically necessary to "reproject"
the product onto said basis set, which typically produces
"unintuitive live" (but correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebmul(c1,c2) # multiplication requires "reprojection"
array([ 6.5, 12. , 12. , 4. , 1.5])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
prd = _zseries_mul(z1, z2)
ret = _zseries_to_cseries(prd)
return pu.trimseq(ret)
def chebdiv(c1, c2):
"""
Divide one Chebyshev series by another.
Returns the quotient-with-remainder of two Chebyshev series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``T_0 + 2*T_1 + 3*T_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Chebyshev series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Chebyshev series coefficients representing the quotient and
remainder.
See Also
--------
chebadd, chebsub, chemulx, chebmul, chebpow
Notes
-----
In general, the (polynomial) division of one C-series by another
results in quotient and remainder terms that are not in the Chebyshev
polynomial basis set. Thus, to express these results as C-series, it
is typically necessary to "reproject" the results onto said basis
set, which typically produces "unintuitive" (but correct) results;
see Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not
(array([3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> C.chebdiv(c2,c1) # neither "intuitive"
(array([0., 2.]), array([-2., -4.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
# note: this is more efficient than `pu._div(chebmul, c1, c2)`
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
z1 = _cseries_to_zseries(c1)
z2 = _cseries_to_zseries(c2)
quo, rem = _zseries_div(z1, z2)
quo = pu.trimseq(_zseries_to_cseries(quo))
rem = pu.trimseq(_zseries_to_cseries(rem))
return quo, rem
def chebpow(c, pow, maxpower=16):
"""Raise a Chebyshev series to a power.
Returns the Chebyshev series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``T_0 + 2*T_1 + 3*T_2.``
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Chebyshev series of power.
See Also
--------
chebadd, chebsub, chebmulx, chebmul, chebdiv
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> C.chebpow([1, 2, 3, 4], 2)
array([15.5, 22. , 16. , ..., 12.5, 12. , 8. ])
"""
# note: this is more efficient than `pu._pow(chebmul, c1, c2)`, as it
# avoids converting between z and c series repeatedly
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
zs = _cseries_to_zseries(c)
prd = zs
for i in range(2, power + 1):
prd = np.convolve(prd, zs)
return _zseries_to_cseries(prd)
def chebder(c, m=1, scl=1, axis=0):
"""
Differentiate a Chebyshev series.
Returns the Chebyshev series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*T_0 + 2*T_1 + 3*T_2``
while [[1,2],[1,2]] represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) +
2*T_0(x)*T_1(y) + 2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Chebyshev series of the derivative.
See Also
--------
chebint
Notes
-----
In general, the result of differentiating a C-series needs to be
"reprojected" onto the C-series basis set. Thus, typically, the
result of this function is "unintuitive," albeit correct; see Examples
section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3,4)
>>> C.chebder(c)
array([14., 12., 24.])
>>> C.chebder(c,3)
array([96.])
>>> C.chebder(c,scl=-1)
array([-14., -12., -24.])
>>> C.chebder(c,2,-1)
array([12., 96.])
"""
c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt = pu._deprecate_as_int(m, "the order of derivation")
iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j)*c[j]
c[j - 2] += (j*c[j])/(j - 2)
if n > 1:
der[1] = 4*c[2]
der[0] = c[1]
c = der
c = np.moveaxis(c, 0, iaxis)
return c
def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Chebyshev series.
Returns the Chebyshev series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``T_0 + 2*T_1 + 3*T_2`` while [[1,2],[1,2]]
represents ``1*T_0(x)*T_0(y) + 1*T_1(x)*T_0(y) + 2*T_0(x)*T_1(y) +
2*T_1(x)*T_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Chebyshev series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at zero
is the first value in the list, the value of the second integral
at zero is the second value, etc. If ``k == []`` (the default),
all constants are set to zero. If ``m == 1``, a single scalar can
be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
C-series coefficients of the integral.
Raises
------
ValueError
If ``m < 1``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or
``np.ndim(scl) != 0``.
See Also
--------
chebder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a`- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3)
>>> C.chebint(c)
array([ 0.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,3)
array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667, # may vary
0.00625 ])
>>> C.chebint(c, k=3)
array([ 3.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,lbnd=-2)
array([ 8.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,scl=-2)
array([-1., 1., -1., -1.])
"""
c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt = pu._deprecate_as_int(m, "the order of integration")
iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if np.ndim(lbnd) != 0:
raise ValueError("lbnd must be a scalar.")
if np.ndim(scl) != 0:
raise ValueError("scl must be a scalar.")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/4
for j in range(2, n):
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[j - 1] -= c[j]/(2*(j - 1))
tmp[0] += k[i] - chebval(lbnd, tmp)
c = tmp
c = np.moveaxis(c, 0, iaxis)
return c
def chebval(x, c, tensor=True):
"""
Evaluate a Chebyshev series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * T_0(x) + c_1 * T_1(x) + ... + c_n * T_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
chebval2d, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
x2 = 2*x
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
c0 = c[-i] - c1
c1 = tmp + c1*x2
return c0 + c1*x
def chebval2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * T_i(x) * T_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than 2 the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
chebval, chebgrid2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._valnd(chebval, c, x, y)
def chebgrid2d(x, y, c):
"""
Evaluate a 2-D Chebyshev series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * T_i(a) * T_j(b),
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebval3d, chebgrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._gridnd(chebval, c, x, y)
def chebval3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * T_i(x) * T_j(y) * T_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebgrid3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._valnd(chebval, c, x, y, z)
def chebgrid3d(x, y, z, c):
"""
Evaluate a 3-D Chebyshev series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * T_i(a) * T_j(b) * T_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
chebval, chebval2d, chebgrid2d, chebval3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._gridnd(chebval, c, x, y, z)
def chebvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = T_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Chebyshev polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
matrix ``V = chebvander(x, n)``, then ``np.dot(V, c)`` and
``chebval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Chebyshev series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Chebyshev polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = pu._deprecate_as_int(deg, "deg")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=False, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries.
v[0] = x*0 + 1
if ideg > 0:
x2 = 2*x
v[1] = x
for i in range(2, ideg + 1):
v[i] = v[i-1]*x2 - v[i-2]
return np.moveaxis(v, 0, -1)
def chebvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (deg[1] + 1)*i + j] = T_i(x) * T_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Chebyshev polynomials.
If ``V = chebvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``chebval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
chebvander, chebvander3d, chebval2d, chebval3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._vander_nd_flat((chebvander, chebvander), (x, y), deg)
def chebvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = T_i(x)*T_j(y)*T_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Chebyshev polynomials.
If ``V = chebvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``chebval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Chebyshev
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
chebvander, chebvander3d, chebval2d, chebval3d
Notes
-----
.. versionadded:: 1.7.0
"""
return pu._vander_nd_flat((chebvander, chebvander, chebvander), (x, y, z), deg)
def chebfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Chebyshev series to data.
Return the coefficients of a Chebyshev series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * T_1(x) + ... + c_n * T_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer,
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Chebyshev coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyfit, legfit, lagfit, hermfit, hermefit
chebval : Evaluates a Chebyshev series.
chebvander : Vandermonde matrix of Chebyshev series.
chebweight : Chebyshev weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Chebyshev series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Chebyshev series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
https://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
return pu._fit(chebvander, x, y, deg, rcond, full, w)
def chebcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is a Chebyshev basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Chebyshev series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded:: 1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.array([1.] + [np.sqrt(.5)]*(n-1))
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[0] = np.sqrt(.5)
top[1:] = 1/2
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5
return mat
def chebroots(c):
"""
Compute the roots of a Chebyshev series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * T_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Chebyshev series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.chebyshev as cheb
>>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots
array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) # may vary
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
# rotated companion matrix reduces error
m = chebcompanion(c)[::-1,::-1]
r = la.eigvals(m)
r.sort()
return r
def chebinterpolate(func, deg, args=()):
"""Interpolate a function at the Chebyshev points of the first kind.
Returns the Chebyshev series that interpolates `func` at the Chebyshev
points of the first kind in the interval [-1, 1]. The interpolating
series tends to a minmax approximation to `func` with increasing `deg`
if the function is continuous in the interval.
.. versionadded:: 1.14.0
Parameters
----------
func : function
The function to be approximated. It must be a function of a single
variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are
extra arguments passed in the `args` parameter.
deg : int
Degree of the interpolating polynomial
args : tuple, optional
Extra arguments to be used in the function call. Default is no extra
arguments.
Returns
-------
coef : ndarray, shape (deg + 1,)
Chebyshev coefficients of the interpolating series ordered from low to
high.
Examples
--------
>>> import numpy.polynomial.chebyshev as C
>>> C.chebfromfunction(lambda x: np.tanh(x) + 0.5, 8)
array([ 5.00000000e-01, 8.11675684e-01, -9.86864911e-17,
-5.42457905e-02, -2.71387850e-16, 4.51658839e-03,
2.46716228e-17, -3.79694221e-04, -3.26899002e-16])
Notes
-----
The Chebyshev polynomials used in the interpolation are orthogonal when
sampled at the Chebyshev points of the first kind. If it is desired to
constrain some of the coefficients they can simply be set to the desired
value after the interpolation, no new interpolation or fit is needed. This
is especially useful if it is known apriori that some of coefficients are
zero. For instance, if the function is even then the coefficients of the
terms of odd degree in the result can be set to zero.
"""
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 0 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int")
if deg < 0:
raise ValueError("expected deg >= 0")
order = deg + 1
xcheb = chebpts1(order)
yfunc = func(xcheb, *args)
m = chebvander(xcheb, deg)
c = np.dot(m.T, yfunc)
c[0] /= order
c[1:] /= 0.5*order
return c
def chebgauss(deg):
"""
Gauss-Chebyshev quadrature.
Computes the sample points and weights for Gauss-Chebyshev quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1/\\sqrt{1 - x^2}`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. For Gauss-Chebyshev there are closed form solutions for
the sample points and weights. If n = `deg`, then
.. math:: x_i = \\cos(\\pi (2 i - 1) / (2 n))
.. math:: w_i = \\pi / n
"""
ideg = pu._deprecate_as_int(deg, "deg")
if ideg <= 0:
raise ValueError("deg must be a positive integer")
x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg))
w = np.ones(ideg)*(np.pi/ideg)
return x, w
def chebweight(x):
"""
The weight function of the Chebyshev polynomials.
The weight function is :math:`1/\\sqrt{1 - x^2}` and the interval of
integration is :math:`[-1, 1]`. The Chebyshev polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded:: 1.7.0
"""
w = 1./(np.sqrt(1. + x) * np.sqrt(1. - x))
return w
def chebpts1(npts):
"""
Chebyshev points of the first kind.
The Chebyshev points of the first kind are the points ``cos(x)``,
where ``x = [pi*(k + .5)/npts for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the first kind.
See Also
--------
chebpts2
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 1:
raise ValueError("npts must be >= 1")
x = np.linspace(-np.pi, 0, _npts, endpoint=False) + np.pi/(2*_npts)
return np.cos(x)
def chebpts2(npts):
"""
Chebyshev points of the second kind.
The Chebyshev points of the second kind are the points ``cos(x)``,
where ``x = [pi*k/(npts - 1) for k in range(npts)]``.
Parameters
----------
npts : int
Number of sample points desired.
Returns
-------
pts : ndarray
The Chebyshev points of the second kind.
Notes
-----
.. versionadded:: 1.5.0
"""
_npts = int(npts)
if _npts != npts:
raise ValueError("npts must be integer")
if _npts < 2:
raise ValueError("npts must be >= 2")
x = np.linspace(-np.pi, 0, _npts)
return np.cos(x)
#
# Chebyshev series class
#
class Chebyshev(ABCPolyBase):
"""A Chebyshev series class.
The Chebyshev class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
methods listed below.
Parameters
----------
coef : array_like
Chebyshev coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(chebadd)
_sub = staticmethod(chebsub)
_mul = staticmethod(chebmul)
_div = staticmethod(chebdiv)
_pow = staticmethod(chebpow)
_val = staticmethod(chebval)
_int = staticmethod(chebint)
_der = staticmethod(chebder)
_fit = staticmethod(chebfit)
_line = staticmethod(chebline)
_roots = staticmethod(chebroots)
_fromroots = staticmethod(chebfromroots)
@classmethod
def interpolate(cls, func, deg, domain=None, args=()):
"""Interpolate a function at the Chebyshev points of the first kind.
Returns the series that interpolates `func` at the Chebyshev points of
the first kind scaled and shifted to the `domain`. The resulting series
tends to a minmax approximation of `func` when the function is
continuous in the domain.
.. versionadded:: 1.14.0
Parameters
----------
func : function
The function to be interpolated. It must be a function of a single
variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are
extra arguments passed in the `args` parameter.
deg : int
Degree of the interpolating polynomial.
domain : {None, [beg, end]}, optional
Domain over which `func` is interpolated. The default is None, in
which case the domain is [-1, 1].
args : tuple, optional
Extra arguments to be used in the function call. Default is no
extra arguments.
Returns
-------
polynomial : Chebyshev instance
Interpolating Chebyshev instance.
Notes
-----
See `numpy.polynomial.chebfromfunction` for more details.
"""
if domain is None:
domain = cls.domain
xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args)
coef = chebinterpolate(xfunc, deg)
return cls(coef, domain=domain)
# Virtual properties
nickname = 'cheb'
domain = np.array(chebdomain)
window = np.array(chebdomain)
basis_name = 'T'
|
{
"content_hash": "86319d7a6e9c20750b49eda520c7346d",
"timestamp": "",
"source": "github",
"line_count": 2064,
"max_line_length": 86,
"avg_line_length": 29.93217054263566,
"alnum_prop": 0.6012787309809,
"repo_name": "WarrenWeckesser/numpy",
"id": "1329ba07d9ea5e24153156c401c5cc1a31db3b41",
"size": "61780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numpy/polynomial/chebyshev.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9059444"
},
{
"name": "C++",
"bytes": "174989"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "8313055"
},
{
"name": "Shell",
"bytes": "9612"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
}
|
import requests
import json
class WebInterface:
'''
Similar to REST API web application,
but to cover the coverage.
'''
def __init__(self):
self.url = 'http://127.0.0.1:8080/tsdb/'
def insert_ts(self, pk, ts):
"""
Insert a timeseries into the database by sending a request to the server.
Parameters
----------
primary_key: int
a unique identifier for the timeseries
ts: a TimeSeries object
the timeseries object intended to be inserted to database
"""
if hasattr(ts, 'to_json'):
ts = ts.to_json()
payload = {'pk': pk, 'ts': ts}
return self.request_post('insert_ts', payload)
def delete_ts(self, pk):
"""
Delete a timeseries from the database by sending a request to the server.
Parameters
----------
primary_key: int
a unique identifier for the timeseries
"""
msg = {'pk': pk}
return self.request_post('delete_ts', msg)
def upsert_meta(self, pk, md):
"""
Upserting metadata into the timeseries in the database designated by the promary key by sending the server a request.
Parameters
----------
primary_key: int
a unique identifier for the timeseries
metadata_dict: dict
the metadata to upserted into the timeseries
"""
payload = {'pk': pk, 'md': md}
return self.request_post('add_metadata', payload)
def select(self, md={}, fields=None, additional=None):
"""
Selecting timeseries elements in the database that match the criteria
set in metadata_dict and return corresponding fields with additional
features.
Parameters
----------
metadata_dict: dict
the selection criteria (filters)
(Options : 'blarg', 'order')
fields: dict
If not `None`, only these fields of the timeseries are returned.
Otherwise, the timeseries are returned.
additional: dict
additional computation to perform on the query matches before they're
returned. Currently provide "sort_by" and "limit" functionality
"""
payload = {'md': md, 'fields': fields, 'additional': additional}
return self.request_get('select', payload)
def augmented_select(self, proc, target, arg=None, md={}, additional=None):
"""
Parameters
----------
proc : enum
which of the modules in procs,
or name of module in procs with coroutine main.
(Options: 'corr', 'junk', 'stats')
target : array of fieldnames
will be mapped to the array of results from the coroutine.
If the target is None rather than a list of fields, we'll assume no upserting
arg : additional argument
(ex : Timeseries object)
metadata_dict : dict
store info for TimeSeries other than TimeSeries object itself
(ex. vantage point is metadata_dict['ts-14']['vp']
additional : dict
(Options: {"sort_by":"-order"})
Returns
-------
tsdb status & payload
"""
if hasattr(arg, 'to_json'):
arg = arg.to_json()
payload = {'proc': proc, 'target': target, 'arg': arg, 'md': md, 'additional': additional}
return self.request_get('augmented_select', payload)
def add_trigger(self, proc, onwhat, target, arg=None):
"""
Send the server a request to add a trigger.
Parameters
----------
`proc` : enum
which of the modules in procs,
or name of module in procs with coroutine main.
(Options: 'corr', 'junk', 'stats')
`onwhat` :
which op is this trigger running on
(ex : "insert_ts")
`target` : array of fieldnames
will be mapped to the array of results from the coroutine.
If the target is None rather than a list of fields, we'll assume no upserting
`arg` :
additional argument
(ex : Timeseries object)
"""
payload = {'proc': proc, 'onwhat': onwhat, 'target': target, 'arg': arg}
return self.request_post('add_trigger', payload)
def remove_trigger(self, proc, onwhat, target=None):
payload = {'proc': proc, 'onwhat': onwhat, 'target': target}
return self.request_post('remove_trigger', payload)
def request_get(self, handler, payload):
try:
r = requests.get(self.url + handler, data={'query':json.dumps(payload)})
return r.status_code
except:
return json.loads('Could not proceed the GET REQUEST')
def request_post(self, handler, payload):
try:
r = requests.post(self.url + handler, data=json.dumps(payload))
return r.status_code
except:
return json.loads('Could not proceed the POST REQUEST')
|
{
"content_hash": "c1c183ae6756036b04a6e4174e464aa8",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 125,
"avg_line_length": 31.67283950617284,
"alnum_prop": 0.5648021828103683,
"repo_name": "cs207-project/TimeSeries",
"id": "fe6a941323f55d3a560789fa4778e1a229f628d3",
"size": "5131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/web_for_coverage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "351769"
},
{
"name": "C++",
"bytes": "125053"
},
{
"name": "Jupyter Notebook",
"bytes": "145413"
},
{
"name": "Python",
"bytes": "261946"
},
{
"name": "Shell",
"bytes": "1437"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
# see LICENSES directory for copyright and license
import os
import sys
import logging
import httplib2
import apiclient.discovery as gapi
import gflags
import oauth2client.file as auth_file
import oauth2client.client as oauth
import oauth2client.tools as tools
OOB_CALLBACK_URN = oauth.OOB_CALLBACK_URN
class AuthenticationConfigError(ValueError):
pass
FLOWS = {}
FLAGS = gflags.FLAGS
DEFAULT_SECRETS = os.path.join(
os.path.dirname(__file__), 'client_secrets.json')
DEFAULT_SCOPE = 'https://www.googleapis.com/auth/analytics.readonly'
DEFAULT_TOKEN_FILE = os.path.join(os.path.dirname(__file__), 'analytics.dat')
MISSING_CLIENT_MSG = """
WARNING: Please configure OAuth 2.0
You need to populate the client_secrets.json file found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
"""
DOC_URL = ('https://developers.google.com/api-client-library/python/guide/'
'aaa_client_secrets')
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
# Name of file that will store the access and refresh tokens to access
# the API without having to login each time. Make sure this file is in
# a secure place.
def process_flags(flags=None):
"""Uses the command-line flags to set the logging level.
Args:
argv: List of command line arguments passed to the python script.
"""
if flags is None:
flags = []
# Let the gflags module process the command-line arguments.
try:
FLAGS(flags)
except gflags.FlagsError as e:
print('%s\nUsage: %s ARGS\n%s' % (e, str(flags), FLAGS))
sys.exit(1)
# Set the logging according to the command-line flag.
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
def get_flow(secret, scope, redirect):
"""
Retrieve an authentication flow object based on the given
configuration in the secret file name, the authentication scope,
and a redirect URN
"""
key = (secret, scope, redirect)
flow = FLOWS.get(key, None)
if flow is None:
msg = MISSING_CLIENT_MSG % secret
if not os.path.exists(secret):
raise AuthenticationConfigError(msg)
flow = oauth.flow_from_clientsecrets(secret, scope,
redirect_uri=redirect,
message=msg)
FLOWS[key] = flow
return flow
def make_token_store(fpath=None):
"""create token storage from give file name"""
if fpath is None:
fpath = DEFAULT_TOKEN_FILE
return auth_file.Storage(fpath)
def authenticate(flow, storage=None):
"""
Try to retrieve a valid set of credentials from the token store if possible
Otherwise use the given authentication flow to obtain new credentials
and return an authenticated http object
Parameters
----------
flow : authentication workflow
storage: token storage, default None
"""
http = httplib2.Http()
# Prepare credentials, and authorize HTTP object with them.
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run(flow, storage)
http = credentials.authorize(http)
return http
def init_service(http):
"""
Use the given http object to build the analytics service object
"""
return gapi.build('analytics', 'v3', http=http)
def reset_default_token_store():
import os
os.remove(DEFAULT_TOKEN_FILE)
|
{
"content_hash": "79c39fe877eac19092b8232dc411a208",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 79,
"avg_line_length": 28.896,
"alnum_prop": 0.6697120708748616,
"repo_name": "BigDataforYou/movie_recommendation_workshop_1",
"id": "b20b7c8ff1b04ae5f32e25a0c23f2cd48fe93e39",
"size": "3612",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/io/auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "738713"
},
{
"name": "C++",
"bytes": "169366"
},
{
"name": "CSS",
"bytes": "14786"
},
{
"name": "Fortran",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "1408733"
},
{
"name": "JavaScript",
"bytes": "13700"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "19755294"
},
{
"name": "Shell",
"bytes": "3276"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
"""models URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from . import views_model
from . import views_auth
indexpage = [
url(r'^$', views_model.index),
]
course = [
url(r'^course/detail/(?P<sisid>[a-zA-Z0-9]+)/$', views_model.course_detail, name='course_detail'),
url(r'^course/create/$', views_model.course_create, name='course_create'),
url(r'^course/delete/$', views_model.course_delete, name='course_delete'),
url(r'^course/all/$', views_model.course_all, name='course_all'),
]
instructor = [
url(r'^instructor/all/$', views_model.instructor_all, name = 'instructor_all'),
url(r'^instructor/detail/(?P<compid>[a-zA-Z0-9]+)/$', views_model.instructor_detail, name='instructor_detail'),
url(r'^instructor/create/$', views_model.instructor_create, name='instructor_create'),
url(r'^instructor/delete/$', views_model.instructor_delete, name='instructor_delete'),
]
student = [
url(r'^student/all/$', views_model.student_all, name='student_all'),
url(r'^student/detail/(?P<compid>[a-zA-Z0-9]+)/$', views_model.student_detail, name='student_detail'),
url(r'^student/create/$', views_model.student_create, name='student_create'),
url(r'^student/delete/$', views_model.student_delete, name='student_delete'),
]
enrollment = [
url(r'^enrollment/detail/(?P<enrid>[0-9]+)/$', views_model.enrollment_detail, name='enrollment_detail'),
url(r'^enrollment/create/$', views_model.enrollment_create, name='enrollment_create'),
url(r'^enrollment/delete/$', views_model.enrollment_delete, name='enrollment_delete'),
url(r'^enrollment/all/$', views_model.enrollment_all, name='enrollment_all'),
]
auth = [
url(r'^auth/login/$', views_auth.login),
url(r'^auth/validate/$', views_auth.validate),
url(r'^auth/logout/$', views_auth.logout),
url(r'^auth/record_co-view/$', views_auth.record_coview),
]
urlpatterns = indexpage + course + instructor + student + enrollment + auth
|
{
"content_hash": "976e6f56dc2afc81a82cde3ca21d2098",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 115,
"avg_line_length": 41.442622950819676,
"alnum_prop": 0.6807753164556962,
"repo_name": "Charleo85/SIS-Rebuild",
"id": "a3edea74c96d941111d17f8eed8f84eee214dd5c",
"size": "2528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/api/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "316744"
},
{
"name": "HTML",
"bytes": "69360"
},
{
"name": "JavaScript",
"bytes": "199092"
},
{
"name": "Python",
"bytes": "223768"
},
{
"name": "Shell",
"bytes": "1252"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Beacon',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('macAddr', models.CharField(max_length=20)),
('uuid', models.UUIDField(editable=False)),
('major', models.CharField(max_length=10)),
('minor', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='BeaconLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('time', models.DateTimeField()),
('rssi', models.IntegerField()),
('measurePower', models.IntegerField()),
('beacon', models.ForeignKey(to='beacon.Beacon')),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('time', models.DateTimeField()),
('event', models.TextField()),
],
),
]
|
{
"content_hash": "21e76bd062a1805a415170a9093b7b85",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 114,
"avg_line_length": 35.1,
"alnum_prop": 0.5220797720797721,
"repo_name": "SmartcitySantiagoChile/onlineGPS",
"id": "f41e7d59b5b2c437ea1866327f2daaa13577efd8",
"size": "1428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beacon/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "160713"
},
{
"name": "JavaScript",
"bytes": "89023"
},
{
"name": "Python",
"bytes": "89233"
}
],
"symlink_target": ""
}
|
"""Provides the worker thread needed for processing streams."""
from fractions import Fraction
import io
import logging
from .const import AUDIO_SAMPLE_RATE
from .core import Segment, StreamBuffer
_LOGGER = logging.getLogger(__name__)
def generate_audio_frame():
"""Generate a blank audio frame."""
from av import AudioFrame
audio_frame = AudioFrame(format="dbl", layout="mono", samples=1024)
# audio_bytes = b''.join(b'\x00\x00\x00\x00\x00\x00\x00\x00'
# for i in range(0, 1024))
audio_bytes = b"\x00\x00\x00\x00\x00\x00\x00\x00" * 1024
audio_frame.planes[0].update(audio_bytes)
audio_frame.sample_rate = AUDIO_SAMPLE_RATE
audio_frame.time_base = Fraction(1, AUDIO_SAMPLE_RATE)
return audio_frame
def create_stream_buffer(stream_output, video_stream, audio_frame):
"""Create a new StreamBuffer."""
import av
a_packet = None
segment = io.BytesIO()
output = av.open(segment, mode="w", format=stream_output.format)
vstream = output.add_stream(template=video_stream)
# Check if audio is requested
astream = None
if stream_output.audio_codec:
astream = output.add_stream(stream_output.audio_codec, AUDIO_SAMPLE_RATE)
# Need to do it multiple times for some reason
while not a_packet:
a_packets = astream.encode(audio_frame)
if a_packets:
a_packet = a_packets[0]
return (a_packet, StreamBuffer(segment, output, vstream, astream))
def stream_worker(hass, stream, quit_event):
"""Handle consuming streams."""
import av
container = av.open(stream.source, options=stream.options)
try:
video_stream = container.streams.video[0]
except (KeyError, IndexError):
_LOGGER.error("Stream has no video")
return
audio_frame = generate_audio_frame()
first_packet = True
# Holds the buffers for each stream provider
outputs = {}
# Keep track of the number of segments we've processed
sequence = 1
# Holds the generated silence that needs to be muxed into the output
audio_packets = {}
# The presentation timestamp of the first video packet we receive
first_pts = 0
# The decoder timestamp of the latest packet we processed
last_dts = None
while not quit_event.is_set():
try:
packet = next(container.demux(video_stream))
if packet.dts is None:
if first_packet:
continue
# If we get a "flushing" packet, the stream is done
raise StopIteration("No dts in packet")
except (av.AVError, StopIteration) as ex:
# End of stream, clear listeners and stop thread
for fmt, _ in outputs.items():
hass.loop.call_soon_threadsafe(stream.outputs[fmt].put, None)
_LOGGER.error("Error demuxing stream: %s", str(ex))
break
# Skip non monotonically increasing dts in feed
if not first_packet and last_dts >= packet.dts:
continue
last_dts = packet.dts
# Reset timestamps from a 0 time base for this stream
packet.dts -= first_pts
packet.pts -= first_pts
# Reset segment on every keyframe
if packet.is_keyframe:
# Calculate the segment duration by multiplying the presentation
# timestamp by the time base, which gets us total seconds.
# By then dividing by the seqence, we can calculate how long
# each segment is, assuming the stream starts from 0.
segment_duration = (packet.pts * packet.time_base) / sequence
# Save segment to outputs
for fmt, buffer in outputs.items():
buffer.output.close()
del audio_packets[buffer.astream]
if stream.outputs.get(fmt):
hass.loop.call_soon_threadsafe(
stream.outputs[fmt].put,
Segment(sequence, buffer.segment, segment_duration),
)
# Clear outputs and increment sequence
outputs = {}
if not first_packet:
sequence += 1
# Initialize outputs
for stream_output in stream.outputs.values():
if video_stream.name != stream_output.video_codec:
continue
a_packet, buffer = create_stream_buffer(
stream_output, video_stream, audio_frame
)
audio_packets[buffer.astream] = a_packet
outputs[stream_output.name] = buffer
# First video packet tends to have a weird dts/pts
if first_packet:
# If we are attaching to a live stream that does not reset
# timestamps for us, we need to do it ourselves by recording
# the first presentation timestamp and subtracting it from
# subsequent packets we receive.
if (packet.pts * packet.time_base) > 1:
first_pts = packet.pts
packet.dts = 0
packet.pts = 0
first_packet = False
# Store packets on each output
for buffer in outputs.values():
# Check if the format requires audio
if audio_packets.get(buffer.astream):
a_packet = audio_packets[buffer.astream]
a_time_base = a_packet.time_base
# Determine video start timestamp and duration
video_start = packet.pts * packet.time_base
video_duration = packet.duration * packet.time_base
if packet.is_keyframe:
# Set first audio packet in sequence to equal video pts
a_packet.pts = int(video_start / a_time_base)
a_packet.dts = int(video_start / a_time_base)
# Determine target end timestamp for audio
target_pts = int((video_start + video_duration) / a_time_base)
while a_packet.pts < target_pts:
# Mux audio packet and adjust points until target hit
buffer.output.mux(a_packet)
a_packet.pts += a_packet.duration
a_packet.dts += a_packet.duration
audio_packets[buffer.astream] = a_packet
# Assign the video packet to the new stream & mux
packet.stream = buffer.vstream
buffer.output.mux(packet)
|
{
"content_hash": "ac4462f268af342e5acf27f6804d422a",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 81,
"avg_line_length": 39.047904191616766,
"alnum_prop": 0.5907069467873025,
"repo_name": "fbradyirl/home-assistant",
"id": "e87221304a381cafcfa23172023513601cc50221",
"size": "6521",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/stream/worker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
}
|
import argparse
from django.core.management.base import AppCommand
from django_seed import Seed
from django_seed.exceptions import SeederCommandError
from toposort import toposort_flatten
from collections import defaultdict
class Command(AppCommand):
help = 'Seed your Django database with fake data.'
def add_arguments(self, parser: argparse.ArgumentParser):
super().add_arguments(parser)
help_text = 'The number of each model to seed (default 10).'
parser.add_argument('-n', '--number', action='store', nargs='?',
default=10, type=int, required=False,
help=help_text, dest='number')
help_text = ('Use this to specify the value a particular field should '
'have rather than seeding with Faker.')
parser.add_argument('--seeder', action='append', nargs=2,
required=False, type=str, help=help_text,
metavar=('model.field', 'value'), dest='seeder')
def handle_app_config(self, app_config, **options):
if app_config.models_module is None:
raise SeederCommandError('You must provide an app to seed')
try:
number = int(options['number'])
except ValueError:
raise SeederCommandError('The value of --number must be an integer')
# Gather seeders
seeders = defaultdict(dict)
self.stdout.write(f'Arguments: {options}', style_func=self.style.SUCCESS)
if options.get('seeder'):
for model_field, func in options['seeder']:
model, field = model_field.split('.')
seeders[model][field] = func
self.stdout.write(f'Forced model field: {model_field}, seeder value: {func}')
# Seed
seeder = Seed.seeder()
for model in self.sorted_models(app_config):
if model.__name__ in seeders:
seeder.add_entity(model, number, seeders[model.__name__])
else:
seeder.add_entity(model, number)
self.stdout.write('Seeding %i %ss' % (number, model.__name__))
generated = seeder.execute()
for model, pks in generated.items():
for pk in pks:
self.stdout.write(f"Model {model.__name__} generated record with primary key {pk}")
def get_model_dependencies(self, models):
dep_dict = {}
dep_class_map = {}
for model in models:
dependencies = set()
model_replacement = '{}.{}'.format(
model.__module__,
model.__name__
)
if model_replacement not in dep_class_map:
dep_class_map[model_replacement] = model
for field in model._meta.get_fields():
if ((field.many_to_one is True or field.many_to_many is True or field.one_to_one is True) and
field.concrete and field.blank is False):
related_model = field.related_model
related_model_type = '{}.{}'.format(
related_model.__module__,
related_model.__name__
)
replacement = related_model_type
if related_model_type not in dep_class_map:
dep_class_map[related_model_type] = related_model
dependencies.add(replacement)
dep_dict[model_replacement] = dependencies
return (dep_dict, dep_class_map)
def sorted_models(self, app_config):
dep_dict, dep_class_map = self.get_model_dependencies(app_config.get_models())
try:
return [dep_class_map[x] for x in toposort_flatten(dep_dict)]
except ValueError as ex:
raise SeederCommandError(str(ex))
|
{
"content_hash": "45204fd0f9890ef7606516b7506e294e",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 109,
"avg_line_length": 38.62,
"alnum_prop": 0.5657690315898498,
"repo_name": "Brobin/django-seed",
"id": "7f2fd2c5167ec30ea8c54ef3ea6663cf068e6951",
"size": "3862",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "django_seed/management/commands/seed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32470"
}
],
"symlink_target": ""
}
|
default_app_config = 'peps.apps.PepsAppConfig'
|
{
"content_hash": "45c339e5cd9250f832f1ca46077fec88",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 46,
"avg_line_length": 47,
"alnum_prop": 0.7872340425531915,
"repo_name": "proevo/pythondotorg",
"id": "c71df192599e8bf265507cbdd7af2fbbe6847875",
"size": "47",
"binary": false,
"copies": "3",
"ref": "refs/heads/dependabot/pip/django-allauth-0.51.0",
"path": "peps/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "711916"
},
{
"name": "JavaScript",
"bytes": "314514"
},
{
"name": "Makefile",
"bytes": "6811"
},
{
"name": "Python",
"bytes": "1448691"
},
{
"name": "Ruby",
"bytes": "218314"
},
{
"name": "Shell",
"bytes": "6730"
}
],
"symlink_target": ""
}
|
import sys
import lasagne.layers.pool as __cloned
from .base import bayes as __bayes
__module = sys.modules[__name__]
del sys
__all__ = []
for obj_name in __cloned.__all__:
try:
setattr(__module, obj_name, __bayes(getattr(__cloned, obj_name)))
__all__ += [obj_name]
except TypeError:
pass
# setattr(__module, obj_name, getattr(__cloned, obj_name))
|
{
"content_hash": "ea6e2cff0a5f78a6d2da8ef88cc7c1db",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 73,
"avg_line_length": 29.846153846153847,
"alnum_prop": 0.6005154639175257,
"repo_name": "ferrine/gelato",
"id": "8038663aa86d636e72d7378a79e489716f5ce0a0",
"size": "388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gelato/layers/pool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36929"
},
{
"name": "Shell",
"bytes": "955"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('crowdsourcing', '0006_batchfile_name'),
]
operations = [
migrations.CreateModel(
name='ModuleBatchFile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('batch_file', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='crowdsourcing.BatchFile')),
],
),
migrations.RenameField(
model_name='module',
old_name='template',
new_name='templates',
),
migrations.AddField(
model_name='modulebatchfile',
name='module',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='crowdsourcing.Module'),
),
migrations.AddField(
model_name='module',
name='batch_files',
field=models.ManyToManyField(through='crowdsourcing.ModuleBatchFile', to='crowdsourcing.BatchFile'),
),
]
|
{
"content_hash": "71c3a578f24451fa6de904e227c69d5b",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 125,
"avg_line_length": 33.19444444444444,
"alnum_prop": 0.596652719665272,
"repo_name": "aginzberg/crowdsource-platform",
"id": "4af09d3ca990c8d6eadf428e753adf7e98702de8",
"size": "1265",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop2",
"path": "crowdsourcing/migrations/0007_auto_20151208_1957.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "328687"
},
{
"name": "HTML",
"bytes": "178994"
},
{
"name": "JavaScript",
"bytes": "168588"
},
{
"name": "Python",
"bytes": "339941"
},
{
"name": "Shell",
"bytes": "838"
}
],
"symlink_target": ""
}
|
"""
mbed SDK
Copyright (c) 2014-2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from exporters import Exporter
from os.path import splitext, basename
from tools.targets import TARGETS
# filter all the GCC_ARM targets out of the target list
gccTargets = []
for t in TARGETS:
if 'GCC_ARM' in t.supported_toolchains:
gccTargets.append(t.name)
class IntermediateFile(Exporter):
NAME = 'EmBlocks'
TOOLCHAIN = 'GCC_ARM'
# we support all GCC targets (is handled on IDE side)
TARGETS = gccTargets
MBED_CONFIG_HEADER_SUPPORTED = True
FILE_TYPES = {
'headers': 'h',
'c_sources': 'c',
's_sources': 'a',
'cpp_sources': 'cpp'
}
def generate(self):
self.resources.win_to_unix()
source_files = []
for r_type, n in IntermediateFile.FILE_TYPES.iteritems():
for file in getattr(self.resources, r_type):
source_files.append({
'name': file, 'type': n
})
libraries = []
for lib in self.resources.libraries:
l, _ = splitext(basename(lib))
libraries.append(l[3:])
if self.resources.linker_script is None:
self.resources.linker_script = ''
ctx = {
'name': self.project_name,
'target': self.target,
'toolchain': self.toolchain.name,
'source_files': source_files,
'include_paths': self.resources.inc_dirs,
'script_file': self.resources.linker_script,
'library_paths': self.resources.lib_dirs,
'libraries': libraries,
'symbols': self.toolchain.get_symbols(),
'object_files': self.resources.objects,
'sys_libs': self.toolchain.sys_libs,
'cc_org': self.flags['common_flags'] + self.flags['c_flags'],
'ld_org': self.flags['common_flags'] + self.flags['ld_flags'],
'cppc_org': self.flags['common_flags'] + self.flags['cxx_flags']
}
# EmBlocks intermediate file template
self.gen_file('emblocks.eix.tmpl', ctx, '%s.eix' % self.project_name)
|
{
"content_hash": "98d3195ae55f47bf526ecec32ce49a37",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 77,
"avg_line_length": 33.0375,
"alnum_prop": 0.6178584941354521,
"repo_name": "andreaslarssonublox/mbed",
"id": "9e2419945290621812d5e0d35cff086880d197ee",
"size": "2643",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/export/emblocks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5902788"
},
{
"name": "C",
"bytes": "166349251"
},
{
"name": "C++",
"bytes": "8338697"
},
{
"name": "CMake",
"bytes": "27635"
},
{
"name": "HTML",
"bytes": "1543876"
},
{
"name": "Makefile",
"bytes": "131072"
},
{
"name": "Objective-C",
"bytes": "166904"
},
{
"name": "Python",
"bytes": "21951"
},
{
"name": "Shell",
"bytes": "24790"
},
{
"name": "XSLT",
"bytes": "11192"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Category'
db.create_table('aw_category', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=20)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='children', null=True, to=orm['aw.Category'])),
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal('aw', ['Category'])
def backwards(self, orm):
# Deleting model 'Category'
db.delete_table('aw_category')
models = {
'aw.category': {
'Meta': {'ordering': "['tree_id', 'lft']", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['aw.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['aw']
|
{
"content_hash": "32f112efdf9bc3595c9bc5f23b341168",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 166,
"avg_line_length": 50.48837209302326,
"alnum_prop": 0.5914325195762321,
"repo_name": "vinilios/feincms-1",
"id": "5c6a4ad2e54e36f02104d934d93345959f3cecd6",
"size": "2195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aw/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from django.conf import settings
from django.http import HttpResponse
from django.utils import translation
from django.utils.translation import (ugettext_lazy as _, pgettext,
check_for_language)
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
from .gateway import validate_voice_locale
from ...compat import get_current_site
from ...views.utils import class_view_decorator
@class_view_decorator(never_cache)
@class_view_decorator(csrf_exempt)
class TwilioCallApp(View):
"""
View used by Twilio for the interactive token verification by phone.
"""
templates = {
'press_a_key': '<?xml version="1.0" encoding="UTF-8" ?>'
'<Response>'
' <Gather timeout="15" numDigits="1" finishOnKey="">'
' <Say language="%(locale)s">%(press_a_key)s</Say>'
' </Gather>'
' <Say language="%(locale)s">%(no_input)s</Say>'
'</Response>',
'token': '<?xml version="1.0" encoding="UTF-8" ?>'
'<Response>'
' <Say language="%(locale)s">%(token)s</Say>'
'</Response>',
}
prompts = {
# Translators: should be a language supported by Twilio,
# see http://bit.ly/187I5cr
'press_a_key': _('Hi, this is %(site_name)s calling. Press any key '
'to continue.'),
# Translators: should be a language supported by Twilio,
# see http://bit.ly/187I5cr
'token': _('Your token is %(token)s. Repeat: %(token)s. Good bye.'),
# Translators: should be a language supported by Twilio,
# see http://bit.ly/187I5cr
'no_input': _('You didn\'t press any keys. Good bye.')
}
def get(self, request, token):
return self.create_response(self.templates['press_a_key'])
def post(self, request, token):
return self.create_response(self.templates['token'])
def create_response(self, template):
with translation.override(self.get_locale()):
prompt_context = self.get_prompt_context()
template_context = dict((k, v % prompt_context) for k, v in self.prompts.items())
template_context['locale'] = self.get_twilio_locale()
return HttpResponse(template % template_context, 'text/xml')
def get_locale(self):
locale = self.request.GET.get('locale', '')
if not check_for_language(locale):
locale = settings.LANGUAGE_CODE
validate_voice_locale(locale)
return locale
def get_twilio_locale(self):
# Translators: twilio_locale should be a locale supported by
# Twilio, see http://bit.ly/187I5cr
return pgettext('twilio_locale', 'en')
def get_prompt_context(self):
return {
'site_name': get_current_site(self.request).name,
# Build the prompt. The numbers have to be clearly pronounced,
# this is by creating a string like "1. 2. 3. 4. 5. 6.", this way
# Twilio reads the numbers one by one.
'token': '. '.join(self.kwargs['token']) if self.request.method == 'POST' else '',
}
|
{
"content_hash": "49add39799bf29376c46d7d174395ceb",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 94,
"avg_line_length": 40.08433734939759,
"alnum_prop": 0.5873159002103998,
"repo_name": "percipient/django-two-factor-auth",
"id": "57f67b92370d47ad51f023b2eb813af3421ef82f",
"size": "3327",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "two_factor/gateways/twilio/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "20642"
},
{
"name": "Makefile",
"bytes": "816"
},
{
"name": "Python",
"bytes": "158589"
}
],
"symlink_target": ""
}
|
from django.utils.encoding import smart_str
from haystack import models
from lazymodel import LazyModel
class SearchResult(models.SearchResult):
"""Extended SearchResult class for general purposes."""
def __getattr__(self, attr):
"""
The __getattr__ method of Haystack's SearchResult is too lenient.
This class will raise exceptions if an attribute is missing.
"""
if attr == '__getnewargs__':
raise AttributeError
try:
return self.__dict__[attr]
except KeyError:
raise AttributeError
def __str__(self):
return smart_str(unicode(self))
@property
def _meta(self):
return self.model._meta
@property
def id(self):
"""Return the database ID instead of the search ID."""
return self.pk
@property
def object(self):
if self._object is None:
self._object = LazyModel(self.model, self.pk)
return self._object
def get_identifier(self):
return self.__dict__['id']
def get_label(self):
return self.model.get_label()
class LazySearchResult(SearchResult):
"""Get missing attributes from the lazy/cached object."""
def __unicode__(self):
return unicode(self.object)
def __getattr__(self, attr):
return getattr(self.object, attr)
|
{
"content_hash": "a427126285fd2ef9b20f080e94eae2c0",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 73,
"avg_line_length": 23.655172413793103,
"alnum_prop": 0.6100583090379009,
"repo_name": "apnarm/django-apn-search",
"id": "122b29492e64801399c962fa8eedcee13155d43f",
"size": "1372",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apn_search/results.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "82253"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.subsystems.zinc_language_mixin import ZincLanguageMixin
from pants.build_graph.address import Address
from pants.option.custom_types import target_option
from pants.subsystem.subsystem import Subsystem
class Java(ZincLanguageMixin, Subsystem):
"""A subsystem to encapsulate compile-time settings and features for the Java language.
Runtime options are captured by the JvmPlatform subsystem.
"""
options_scope = 'java'
@classmethod
def register_options(cls, register):
super(Java, cls).register_options(register)
register('--compiler-plugin-deps', advanced=True, type=list, member_type=target_option,
fingerprint=True,
help='Requested javac plugins will be found in these targets, as well as in any '
'dependencies of the targets being compiled.')
@classmethod
def global_plugin_dependency_specs(cls):
# TODO: This check is a hack to allow tests to pass without having to set up subsystems.
# We have hundreds of tests that use JvmTargets, either as a core part of the test, or
# incidentally when testing build graph functionality, and it would be onerous to make them
# all set up a subsystem they don't care about.
# See https://github.com/pantsbuild/pants/issues/3409.
if cls.is_initialized():
return cls.global_instance().plugin_dependency_specs()
else:
return []
def __init__(self, *args, **kwargs):
super(Java, self).__init__(*args, **kwargs)
opts = self.get_options()
# TODO: This check is a continuation of the hack that allows tests to pass without caring
# about this subsystem.
if hasattr(opts, 'compiler_plugin_deps'):
# Parse the specs in order to normalize them, so we can do string comparisons on them.
self._dependency_specs = [Address.parse(spec).spec
for spec in self.get_options().compiler_plugin_deps]
else:
self._dependency_specs = []
def plugin_dependency_specs(self):
return self._dependency_specs
|
{
"content_hash": "15cda9531f19717e9e1e6fcf2bf203a2",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 95,
"avg_line_length": 43.88,
"alnum_prop": 0.7000911577028259,
"repo_name": "gmalmquist/pants",
"id": "259c55eec687fe044cc426840de4952c7a427c8a",
"size": "2341",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/jvm/subsystems/java.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1746"
},
{
"name": "HTML",
"bytes": "79866"
},
{
"name": "Java",
"bytes": "437330"
},
{
"name": "JavaScript",
"bytes": "29992"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "5053630"
},
{
"name": "Scala",
"bytes": "84585"
},
{
"name": "Shell",
"bytes": "58748"
},
{
"name": "Thrift",
"bytes": "1966"
}
],
"symlink_target": ""
}
|
"""Goldstone unit test utilities."""
# Copyright 2015 Solinea, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.contrib.auth import get_user_model
from django.conf import settings
from rest_framework.status import HTTP_200_OK
# Test URLs.
from rest_framework.test import APITransactionTestCase
LOGIN_URL = "/accounts/login/"
USER_URL = "/user/"
# Http response content used by multiple tests.
CONTENT_BAD_TOKEN = '{"detail":"Invalid token."}'
CONTENT_MISSING_PASSWORD = '"password":["This field is required."]'
CONTENT_MISSING_USERNAME = '"username":["This field is required."]'
CONTENT_NO_CREDENTIALS = \
'{"detail":"Authentication credentials were not provided."}'
CONTENT_NO_PERMISSION = \
'{"detail":"You do not have permission to perform this action."}'
CONTENT_NON_FIELD_ERRORS = \
'{"non_field_errors":["Unable to login with provided credentials."]}'
CONTENT_NOT_BLANK_USERNAME = '"username":["This field is required."]'
CONTENT_NOT_FOUND = "Not found."
CONTENT_PERMISSION_DENIED = '{"detail":"Permission denied."}'
CONTENT_UNIQUE_USERNAME = '{"username":["This field must be unique."]}'
CONTENT_UNIQUE_NAME = 'Tenant with this name already exists.'
# The payload string for the HTTP Authorization header.
AUTHORIZATION_PAYLOAD = "Token %s"
# Test data
TEST_USER_1 = ("fred", "fred@fred.com", "meh")
TEST_USER_2 = ("ginger", "ginger@ginger.com", "hem")
BAD_TOKEN = '4' * 40
BAD_UUID = '4' * 32
PAGE_SIZE = settings.REST_FRAMEWORK['PAGE_SIZE']
class Setup(APITransactionTestCase):
"""A base class to do housekeeping before each test."""
def setUp(self):
"""Do additional inter-test resetting."""
from goldstone.core import resource
get_user_model().objects.all().delete()
resource.types = resource.Types()
# Resource.instances may have been used before this test, so force it
# into a virgin state.
resource.instances._graph = None # pylint: disable=W0212
def login(username, password):
"""Log a user in.
This is for use on a login that is supposed to succeed. It checks the
system response with asserts before returning.
:param username: The username to use
:type username: str
:param password: The password to use
:type password: str
:return: If a successful login, the authorization token's value
:rtype: str
"""
from django.test import Client
# Log the user in, and return the auth token's value.
client = Client()
response = client.post(LOGIN_URL,
{"username": username, "password": password})
# pylint: disable=E1101
assert response.status_code == HTTP_200_OK
assert isinstance(response.data["auth_token"], basestring)
return response.data["auth_token"] # pylint: disable=E1101
def create_and_login(is_superuser=False, tenant=None,
user_template=TEST_USER_1):
"""Create a user and log her in.
:keyword is_superuser: Set the is_superuser flag in the User record?
(A.k.a., create a Django admin account?)
:type is_superuser: bool
:keyword tenant: If not None, make the user a tenant_admin of this tenant
:type tenant: Tenant
:keyword user: Details of the user to create
:type user: dict
:return: The authorization token's value
:rtype: str
"""
# Create a user
user = get_user_model().objects.create_user(*user_template)
user.is_superuser = is_superuser
user.is_staff = is_superuser
if tenant:
user.tenant = tenant
user.tenant_admin = True
user.save()
return login(user_template[0], user_template[2])
def check_response_without_uuid(response, expected_status_code,
expected_content, uuid_under_results=False,
extra_keys=None):
"""Compare a response's content with expected content, without fully
comparing the "uuid" key.
A uuid is random. For responses that contain one, we confirm that the uuid
key exists and its string value is at least N digits long. If those checks
pass, we assume the uuid is correct, and then do an exact comparison of the
remainder of the response.
The uuid key may be, "UUID," or, "uuid".
The extra_keys hook allows the caller to specify the same treatment (except
for the length of the value) for arbitrary keys.
:param response: The HTTP response to be tested
:type response: django.test.client.Response
:param expected_status_code: The expected status code
:type expected_status_code: rest_framework.status.HTTP*
:param expected_content: The expected response.content, without a uuid key
:type expected_content: dict
:keyword uuid_under_results: If True, response.content contains a 'results'
key. Its value is a list of dicts, and each
dict contains a "uuid" key. If False,
response.content contains a "uuid" key.
:type uuid_under_results: bool
:keyword extra_keys: If not None, a list of keys. These keys will be
checked for their existence
:type extra_keys: list of str
"""
import json
def uuid_check(response_dict):
"""Check the uuid key and value."""
assert isinstance(response_dict.get("uuid", response_dict.get("UUID")),
basestring)
assert len(response_dict.get("uuid", response_dict.get("UUID"))) >= 32
assert response.status_code == expected_status_code
# Deserialize the response content to simplify checking.
response_content = json.loads(response.content)
if uuid_under_results:
# Look under the 'results' key for a list of dicts. Each dict must
# have a 'uuid' key.
for entry in response_content["results"]:
uuid_check(entry)
if "uuid" in entry:
del entry["uuid"]
else:
del entry["UUID"]
else:
# Look under response_content for the single 'uuid' key.
uuid_check(response_content)
if "uuid" in response_content:
del response_content["uuid"]
else:
del response_content["UUID"]
# Check the extra_keys for existence.
if extra_keys is not None:
for key in extra_keys:
assert key in response_content
del response_content[key]
if uuid_under_results:
# == compares nested dict values with order sensitivity, if the values
# are lists.
response_content["results"].sort()
expected_content["results"].sort()
# Now check every other key in the response.
assert response_content == expected_content
|
{
"content_hash": "7e0eb9c1fd7fb735d531f58ddc79df4d",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 79,
"avg_line_length": 35.92118226600985,
"alnum_prop": 0.6570213933077345,
"repo_name": "slashk/goldstone-server",
"id": "4b4c7319e6ddc13263f633d6120c42dee4591387",
"size": "7292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "goldstone/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "240381"
},
{
"name": "HTML",
"bytes": "42932"
},
{
"name": "JavaScript",
"bytes": "3919162"
},
{
"name": "Makefile",
"bytes": "7457"
},
{
"name": "Nginx",
"bytes": "643"
},
{
"name": "Python",
"bytes": "804488"
},
{
"name": "Ruby",
"bytes": "4717"
},
{
"name": "Shell",
"bytes": "49481"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from setuptools import setup, find_packages, Extension
import os
from distutils.errors import DistutilsSetupError
from distutils import log
from distutils.dep_util import newer_group
from distutils.core import Command
import pkg_resources
import platform
import sys
needs_pytest = {'pytest', 'test'}.intersection(sys.argv)
pytest_runner = ['pytest_runner'] if needs_pytest else []
needs_wheel = {'bdist_wheel'}.intersection(sys.argv)
wheel = ['wheel'] if needs_wheel else []
# use Cython if available, else try use pre-generated .cpp sources
cython_min_version = '0.24'
try:
pkg_resources.require("cython >= %s" % cython_min_version)
except pkg_resources.ResolutionError:
with_cython = False
log.info('Distribution mode: Compiling from Cython-generated .cpp sources.')
from setuptools.command.build_ext import build_ext
else:
with_cython = True
log.info('Development mode: Compiling Cython modules from .pyx sources.')
from Cython.Distutils import build_ext
class custom_build_ext(build_ext):
""" Custom 'build_ext' command which allows to pass compiler-specific
'extra_compile_args', 'define_macros' and 'undef_macros' options.
"""
def finalize_options(self):
build_ext.finalize_options(self)
if self.compiler is None:
# we use this variable with tox to build using GCC on Windows.
# https://bitbucket.org/hpk42/tox/issues/274/specify-compiler
self.compiler = os.environ.get("DISTUTILS_COMPILER", None)
if self.compiler == "mingw32":
# workaround for virtualenv changing order of libary_dirs on
# Windows, which makes gcc fail to link with the correct libpython
# https://github.com/mingwpy/mingwpy.github.io/issues/31
self.library_dirs.insert(0, os.path.join(sys.exec_prefix, 'libs'))
def build_extension(self, ext):
sources = ext.sources
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'ext_modules' option (extension '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % ext.name)
sources = list(sources)
ext_path = self.get_ext_fullpath(ext.name)
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_path, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
# do compiler specific customizations
compiler_type = self.compiler.compiler_type
if isinstance(ext.extra_compile_args, dict):
extra_args_dict = ext.extra_compile_args or {}
if compiler_type in extra_args_dict:
extra_args = extra_args_dict[compiler_type]
else:
extra_args = extra_args_dict.get("default", [])
else:
extra_args = ext.extra_compile_args or []
if isinstance(ext.define_macros, dict):
macros_dict = ext.define_macros or {}
if compiler_type in macros_dict:
macros = macros_dict[compiler_type]
else:
macros = macros_dict.get("default", [])
else:
macros = ext.define_macros or []
if isinstance(ext.undef_macros, dict):
undef_macros_dict = ext.undef_macros
for tp, undef in undef_macros_dict.items():
if tp == compiler_type:
macros.append((undef,))
else:
for undef in ext.undef_macros:
macros.append((undef,))
# compile the source code to object files.
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
# Now link the object files together into a "shared object"
if ext.extra_objects:
objects.extend(ext.extra_objects)
# TODO: do compiler-specific extra link args?
extra_args = ext.extra_link_args or []
# Detect target language, if not provided
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects, ext_path,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
extensions = [
Extension(
"compreffor._compreffor",
sources=[
os.path.join('src', 'cython', (
'_compreffor' + ('.pyx' if with_cython else '.cpp'))),
os.path.join('src', 'cxx', "cffCompressor.cc"),
],
depends=[os.path.join('src', 'cxx', 'cffCompressor.h')],
extra_compile_args={
"default": [
"-std=c++0x", "-pthread",
"-Wextra", "-Wno-unused", "-Wno-unused-parameter",
# pass extra compiler flags on OS X to enable support for C++11
] + (["-stdlib=libc++", "-mmacosx-version-min=10.7"]
if platform.system() == "Darwin" else []),
"msvc": ["/EHsc", "/Zi"],
},
define_macros={
# On Windows Python 2.7, pyconfig.h defines "hypot" as "_hypot",
# This clashes with GCC's cmath, and causes compilation errors when
# building under MinGW: http://bugs.python.org/issue11566
"mingw32": [("_hypot", "hypot")],
},
language="c++",
),
]
class PassCommand(Command):
""" This is used with Travis `dpl` tool so that it skips creating wheel
packages, and simply uploads to PyPI the ones that have been previously
built inside the manylinux1 docker container.
"""
description = "do nothing"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pass
with open('README.rst', 'r') as f:
long_description = f.read()
setup_params = dict(
name="compreffor",
version="0.4.1",
description="A CFF subroutinizer for fontTools.",
long_description=long_description,
author="Sam Fishman",
license="Apache 2.0",
package_dir={'': 'src/python'},
packages=find_packages('src/python'),
ext_modules=extensions,
cmdclass={
'build_ext': custom_build_ext,
'pass': PassCommand,
},
setup_requires=pytest_runner + wheel,
tests_require=[
'pytest>=2.8',
],
install_requires=[
"fonttools>=3.1",
],
entry_points={
'console_scripts': [
"compreffor = compreffor.__main__:main",
]
},
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Multimedia :: Graphics",
"Topic :: Multimedia :: Graphics :: Graphics Conversion",
],
)
if __name__ == "__main__":
setup(**setup_params)
|
{
"content_hash": "c8bbab54fac4c5c750552ca3f277cf95",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 80,
"avg_line_length": 35.470588235294116,
"alnum_prop": 0.5813241484883276,
"repo_name": "anthrotype/compreffor",
"id": "2d3a6b37fb6cc728bcbd1548fafe179a8d026806",
"size": "7861",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "40787"
},
{
"name": "Makefile",
"bytes": "934"
},
{
"name": "Python",
"bytes": "82965"
},
{
"name": "Shell",
"bytes": "830"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/base/shared_base_tower.iff"
result.attribute_template_id = -1
result.stfName("building_name","base_tower")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "c7790440be6e43f572f024d564040b86",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 63,
"avg_line_length": 22.923076923076923,
"alnum_prop": 0.6912751677852349,
"repo_name": "obi-two/Rebelion",
"id": "ac79fd9d0f030f42b0cc894f115ad37898c710e5",
"size": "443",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/building/base/shared_base_tower.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""
Author: WFJ
Version: 0.1.0
FileName: setting_views.py
CreateTime: 2017-04-20 23:19
"""
from flask import render_template
from . import manage_blueprint as manage
@manage.route('/global-setting')
def global_setting():
return render_template('manage/global_setting.html')
|
{
"content_hash": "952ebe953e70773ba24e02e437fc7b34",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 56,
"avg_line_length": 25.25,
"alnum_prop": 0.6864686468646864,
"repo_name": "DreamWFJ/Everyeye",
"id": "d52b2a67b14d0875ff4c67af1fa08342ae6d028b",
"size": "350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/core/manage/setting_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "142471"
},
{
"name": "HTML",
"bytes": "528492"
},
{
"name": "JavaScript",
"bytes": "68111"
},
{
"name": "Python",
"bytes": "177576"
},
{
"name": "Shell",
"bytes": "5909"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include, url
from rest_framework.renderers import (
CoreJSONRenderer, DocumentationRenderer, SchemaJSRenderer
)
from rest_framework.schemas import get_schema_view
def get_docs_view(title=None, description=None, schema_url=None, public=True):
renderer_classes = [DocumentationRenderer, CoreJSONRenderer]
return get_schema_view(
title=title,
url=schema_url,
description=description,
renderer_classes=renderer_classes,
public=public
)
def get_schemajs_view(title=None, description=None, schema_url=None, public=True):
renderer_classes = [SchemaJSRenderer]
return get_schema_view(
title=title,
url=schema_url,
description=description,
renderer_classes=renderer_classes,
public=public
)
def include_docs_urls(title=None, description=None, schema_url=None, public=True):
docs_view = get_docs_view(
title=title,
description=description,
schema_url=schema_url,
public=public
)
schema_js_view = get_schemajs_view(
title=title,
description=description,
schema_url=schema_url,
public=public
)
urls = [
url(r'^$', docs_view, name='docs-index'),
url(r'^schema.js$', schema_js_view, name='schema-js')
]
return include(urls, namespace='api-docs')
|
{
"content_hash": "62f8d627aa7a8c1ddc4b1e587ae8b8cf",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 82,
"avg_line_length": 27.72,
"alnum_prop": 0.6594516594516594,
"repo_name": "raajitr/django_hangman",
"id": "3a56b7cb1deac4ec5cb11a58b347c9dc47319890",
"size": "1386",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/rest_framework/documentation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7707"
},
{
"name": "HTML",
"bytes": "9229"
},
{
"name": "JavaScript",
"bytes": "5043"
},
{
"name": "Python",
"bytes": "10324"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.cache import cache
from cms.admin import forms
from cms.admin.forms import (PageUserForm, PagePermissionInlineAdminForm,
ViewRestrictionInlineAdminForm, GlobalPagePermissionAdminForm,
PageUserGroupForm)
from cms.api import create_page, create_page_user, assign_user_to_page
from cms.forms.fields import PageSelectFormField, SuperLazyIterator
from cms.forms.utils import update_site_and_page_choices, get_site_choices, get_page_choices
from cms.models import ACCESS_PAGE, ACCESS_PAGE_AND_CHILDREN
from cms.test_utils.testcases import (
CMSTestCase, URL_CMS_PAGE_PERMISSION_CHANGE, URL_CMS_PAGE_PERMISSIONS
)
from cms.utils.permissions import set_current_user
class Mock_PageSelectFormField(PageSelectFormField):
def __init__(self, required=False):
# That's to have a proper mock object, without having to resort
# to dirtier tricks. We want to test *just* compress here.
self.required = required
self.error_messages = {}
self.error_messages['invalid_page'] = 'Invalid_page'
class FormsTestCase(CMSTestCase):
def setUp(self):
cache.clear()
def test_get_site_choices(self):
result = get_site_choices()
self.assertEqual(result, [])
def test_get_page_choices(self):
result = get_page_choices()
self.assertEqual(result, [('', '----')])
def test_get_site_choices_without_moderator(self):
result = get_site_choices()
self.assertEqual(result, [])
def test_get_site_choices_without_moderator_with_superuser(self):
# boilerplate (creating a page)
User = get_user_model()
fields = dict(is_staff=True, is_active=True, is_superuser=True, email="super@super.com")
if User.USERNAME_FIELD != 'email':
fields[User.USERNAME_FIELD] = "super"
user_super = User(**fields)
user_super.set_password(getattr(user_super, User.USERNAME_FIELD))
user_super.save()
with self.login_user_context(user_super):
create_page("home", "nav_playground.html", "en", created_by=user_super)
# The proper test
result = get_site_choices()
self.assertEqual(result, [(1, 'example.com')])
def test_compress_function_raises_when_page_is_none(self):
raised = False
try:
fake_field = Mock_PageSelectFormField(required=True)
data_list = (0, None) #(site_id, page_id) dsite-id is not used
fake_field.compress(data_list)
self.fail('compress function didn\'t raise!')
except forms.ValidationError:
raised = True
self.assertTrue(raised)
def test_compress_function_returns_none_when_not_required(self):
fake_field = Mock_PageSelectFormField(required=False)
data_list = (0, None) #(site_id, page_id) dsite-id is not used
result = fake_field.compress(data_list)
self.assertEqual(result, None)
def test_compress_function_returns_none_when_no_data_list(self):
fake_field = Mock_PageSelectFormField(required=False)
data_list = None
result = fake_field.compress(data_list)
self.assertEqual(result, None)
def test_compress_function_gets_a_page_when_one_exists(self):
# boilerplate (creating a page)
User = get_user_model()
fields = dict(is_staff=True, is_active=True, is_superuser=True, email="super@super.com")
if User.USERNAME_FIELD != 'email':
fields[User.USERNAME_FIELD] = "super"
user_super = User(**fields)
user_super.set_password(getattr(user_super, User.USERNAME_FIELD))
user_super.save()
with self.login_user_context(user_super):
home_page = create_page("home", "nav_playground.html", "en", created_by=user_super)
# The actual test
fake_field = Mock_PageSelectFormField()
data_list = (0, home_page.pk) #(site_id, page_id) dsite-id is not used
result = fake_field.compress(data_list)
self.assertEqual(home_page, result)
def test_update_site_and_page_choices(self):
Site.objects.all().delete()
site = Site.objects.create(domain='http://www.django-cms.org', name='Django CMS', pk=1)
page1 = create_page('Page 1', 'nav_playground.html', 'en', site=site)
page2 = create_page('Page 2', 'nav_playground.html', 'de', site=site)
page3 = create_page('Page 3', 'nav_playground.html', 'en',
site=site, parent=page1)
# enforce the choices to be casted to a list
site_choices, page_choices = [list(bit) for bit in update_site_and_page_choices('en')]
self.assertEqual(page_choices, [
('', '----'),
(site.name, [
(page1.pk, 'Page 1'),
(page3.pk, ' Page 3'),
(page2.pk, 'Page 2'),
])
])
self.assertEqual(site_choices, [(site.pk, site.name)])
def test_superlazy_iterator_behaves_properly_for_sites(self):
normal_result = get_site_choices()
lazy_result = SuperLazyIterator(get_site_choices)
self.assertEqual(normal_result, list(lazy_result))
def test_superlazy_iterator_behaves_properly_for_pages(self):
normal_result = get_page_choices()
lazy_result = SuperLazyIterator(get_page_choices)
self.assertEqual(normal_result, list(lazy_result))
def test_page_user_form_initial(self):
if get_user_model().USERNAME_FIELD == 'email':
myuser = get_user_model().objects.create_superuser("myuser", "myuser@django-cms.org",
"myuser@django-cms.org")
else:
myuser = get_user_model().objects.create_superuser("myuser", "myuser@django-cms.org", "myuser")
user = create_page_user(myuser, myuser, grant_all=True)
puf = PageUserForm(instance=user)
names = ['can_add_page', 'can_change_page', 'can_delete_page',
'can_add_pageuser', 'can_change_pageuser',
'can_delete_pageuser', 'can_add_pagepermission',
'can_change_pagepermission', 'can_delete_pagepermission']
for name in names:
self.assertTrue(puf.initial.get(name, False))
class PermissionFormTestCase(CMSTestCase):
def test_permission_forms(self):
page = create_page("page_b", "nav_playground.html", "en",
created_by=self.get_superuser())
normal_user = self._create_user("randomuser", is_staff=True, add_default_permissions=True)
assign_user_to_page(page, normal_user, can_view=True,
can_change=True)
with self.login_user_context(self.get_superuser()):
response = self.client.get(URL_CMS_PAGE_PERMISSION_CHANGE % page.pk)
self.assertEqual(response.status_code, 200)
response = self.client.get(URL_CMS_PAGE_PERMISSIONS % page.pk)
self.assertEqual(response.status_code, 200)
with self.settings(CMS_RAW_ID_USERS=True):
data = {
'page': page.pk,
'grant_on': "hello",
}
form = PagePermissionInlineAdminForm(data=data, files=None)
self.assertFalse(form.is_valid())
data = {
'page': page.pk,
'grant_on': ACCESS_PAGE,
}
form = PagePermissionInlineAdminForm(data=data, files=None)
self.assertTrue(form.is_valid())
form.save()
data = {
'page': page.pk,
'grant_on': ACCESS_PAGE_AND_CHILDREN,
'can_add': '1',
'can_change': ''
}
form = PagePermissionInlineAdminForm(data=data, files=None)
self.assertFalse(form.is_valid())
self.assertTrue('<li>Add page permission also requires edit page '
'permission.</li>' in str(form.errors))
data = {
'page': page.pk,
'grant_on': ACCESS_PAGE,
'can_add': '1',
}
form = PagePermissionInlineAdminForm(data=data, files=None)
self.assertFalse(form.is_valid())
self.assertTrue('<li>Add page permission requires also access to children, or '
'descendants, otherwise added page can't be changed by its '
'creator.</li>' in str(form.errors))
def test_inlines(self):
user = self._create_user("randomuser", is_staff=True, add_default_permissions=True)
page = create_page("page_b", "nav_playground.html", "en",
created_by=self.get_superuser())
data = {
'page': page.pk,
'grant_on': ACCESS_PAGE_AND_CHILDREN,
'can_view': 'True',
'user': '',
'group': '',
}
set_current_user(self.get_superuser())
form = ViewRestrictionInlineAdminForm(data=data, files=None)
self.assertTrue(form.is_valid())
data = {
'page': page.pk,
'grant_on': ACCESS_PAGE_AND_CHILDREN,
'can_view': 'True',
'user': '',
'group': ''
}
form = GlobalPagePermissionAdminForm(data=data, files=None)
self.assertFalse(form.is_valid())
data = {
'page': page.pk,
'grant_on': ACCESS_PAGE_AND_CHILDREN,
'can_view': 'True',
'user': user.pk,
}
form = GlobalPagePermissionAdminForm(data=data, files=None)
self.assertTrue(form.is_valid())
def test_user_forms(self):
user = self.get_superuser()
user2 = self._create_user("randomuser", is_staff=True, add_default_permissions=True)
set_current_user(user)
data = {'username': "test",
'password': 'hello',
'password1': 'hello',
'password2': 'hello',
'created_by': user.pk,
'last_login': datetime.now(),
'date_joined': datetime.now(),
'email': 'test@example.com',
}
form = PageUserForm(data=data, files=None)
self.assertTrue(form.is_valid(), form.errors)
form.save()
data = {'username': "test2",
'password': 'hello',
'password1': 'hello',
'password2': 'hello',
'email': 'test2@example.com',
'created_by': user.pk,
'last_login': datetime.now(),
'date_joined': datetime.now(),
'notify_user': 'on',
}
form = PageUserForm(data=data, files=None, instance=user2)
self.assertTrue(form.is_valid(), form.errors)
form.save()
data = {
'name': 'test_group'
}
form = PageUserGroupForm(data=data, files=None)
self.assertTrue(form.is_valid(), form.errors)
instance = form.save()
form = PageUserGroupForm(data=data, files=None, instance=instance)
self.assertTrue(form.is_valid(), form.errors)
form.save()
|
{
"content_hash": "d1e3da3344b008f17b6f16fae88b1bbd",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 107,
"avg_line_length": 40.612099644128115,
"alnum_prop": 0.578776726253067,
"repo_name": "vxsx/django-cms",
"id": "fbf944d40b0e060c9e7ccbcee8669795b5aa29b6",
"size": "11436",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "cms/tests/test_forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "133419"
},
{
"name": "HTML",
"bytes": "154109"
},
{
"name": "JavaScript",
"bytes": "1172445"
},
{
"name": "Python",
"bytes": "1996894"
},
{
"name": "Shell",
"bytes": "28"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('game', '0005_auto_20161104_1713'),
]
operations = [
migrations.AlterField(
model_name='playergamescore',
name='starters',
field=models.CharField(choices=[('Y', 'Starter'), ('S', 'Substitutes'), ('NP', 'Not Play'), ('NA', 'N/A')], default='NA', max_length=2),
),
]
|
{
"content_hash": "70c3065bd33171dabb3e33a52c25a44e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 148,
"avg_line_length": 26.88888888888889,
"alnum_prop": 0.5764462809917356,
"repo_name": "vollov/ocbl.ca",
"id": "6bec7ee7210e11e3360d61d1c67354a808adb748",
"size": "555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/game/migrations/0006_auto_20161204_1818.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1985"
},
{
"name": "HTML",
"bytes": "24791"
},
{
"name": "Python",
"bytes": "103785"
}
],
"symlink_target": ""
}
|
import abc
from pymatgen.core.periodic_table import get_el_sp
from monty.json import MSONable
from pymatgen.analysis.structure_matcher import StructureMatcher,\
ElementComparator
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from collections import defaultdict
"""
This module defines filters for Transmuter object.
"""
__author__ = "Will Richards, Shyue Ping Ong, Stephen Dacek"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Will Richards"
__email__ = "wrichards@mit.edu"
__date__ = "Sep 25, 2012"
class AbstractStructureFilter(MSONable, metaclass=abc.ABCMeta):
"""
AbstractStructureFilter that defines an API to perform testing of
Structures. Structures that return True to a test are retained during
transmutation while those that return False are removed.
"""
@abc.abstractmethod
def test(self, structure):
"""
Method to execute the test.
Returns:
(bool) Structures that return true are kept in the Transmuter
object during filtering.
"""
return
class ContainsSpecieFilter(AbstractStructureFilter):
def __init__(self, species, strict_compare=False, AND=True, exclude=False):
"""
Filter for structures containing certain elements or species.
By default compares by atomic number
Args:
species ([Specie/Element]): list of species to look for
AND: whether all species must be present to pass (or fail) filter.
strict_compare: if true, compares objects by specie or element
object if false, compares atomic number
exclude: If true, returns false for any structures with the specie
(excludes them from the Transmuter)
"""
self._species = list(map(get_el_sp, species))
self._strict = strict_compare
self._AND = AND
self._exclude = exclude
def test(self, structure):
# set up lists to compare
if not self._strict:
# compare by atomic number
atomic_number = lambda x: x.Z
filter_set = set(map(atomic_number, self._species))
structure_set = set(map(atomic_number,
structure.composition.elements))
else:
# compare by specie or element object
filter_set = set(self._species)
structure_set = set(structure.composition.elements)
if self._AND and filter_set <= structure_set:
# return true if we aren't excluding since all are in structure
return not self._exclude
elif (not self._AND) and filter_set & structure_set:
# return true if we aren't excluding since one is in structure
return not self._exclude
else:
# return false if we aren't excluding otherwise
return self._exclude
def __repr__(self):
return "\n".join(["ContainsSpecieFilter with parameters:",
"species = {}".format(self._species),
"strict_compare = {}".format(self._strict),
"AND = {}".format(self._AND),
"exclude = {}".format(self._exclude)])
def as_dict(self):
return {"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"init_args": {"species": [str(sp) for sp in self._species],
"strict_compare": self._strict,
"AND": self._AND,
"exclude": self._exclude}}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
class SpecieProximityFilter(AbstractStructureFilter):
"""
This filter removes structures that have certain species that are too close
together.
Args:
specie_and_min_dist_dict: A species string to float mapping. For
example, {"Na+": 1} means that all Na+ ions must be at least 1
Angstrom away from each other. Multiple species criteria can be
applied. Note that the testing is done based on the actual object
. If you have a structure with Element, you must use {"Na":1}
instead to filter based on Element and not Specie.
"""
def __init__(self, specie_and_min_dist_dict):
self.specie_and_min_dist = {get_el_sp(k): v
for k, v
in specie_and_min_dist_dict.items()}
def test(self, structure):
all_species = set(self.specie_and_min_dist.keys())
for site in structure:
species = site.species.keys()
sp_to_test = set(species).intersection(all_species)
if sp_to_test:
max_r = max([self.specie_and_min_dist[sp]
for sp in sp_to_test])
nn = structure.get_neighbors(site, max_r)
for sp in sp_to_test:
for (nnsite, dist) in nn:
if sp in nnsite.species.keys():
if dist < self.specie_and_min_dist[sp]:
return False
return True
def as_dict(self):
return {"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"init_args": {"specie_and_min_dist_dict":
{str(sp): v
for sp, v in self.specie_and_min_dist.items()}}}
@classmethod
def from_dict(cls, d):
return cls(**d["init_args"])
class RemoveDuplicatesFilter(AbstractStructureFilter):
"""
This filter removes exact duplicate structures from the transmuter.
"""
def __init__(self, structure_matcher=StructureMatcher(
comparator=ElementComparator()), symprec=None):
"""
Remove duplicate structures based on the structure matcher
and symmetry (if symprec is given).
Args:
structure_matcher: Provides a structure matcher to be used for
structure comparison.
symprec: The precision in the symmetry finder algorithm if None (
default value), no symmetry check is performed and only the
structure matcher is used. A recommended value is 1e-5.
"""
self.symprec = symprec
self.structure_list = defaultdict(list)
if isinstance(structure_matcher, dict):
self.structure_matcher = StructureMatcher.from_dict(structure_matcher)
else:
self.structure_matcher = structure_matcher
def test(self, structure):
h = self.structure_matcher._comparator.get_hash(structure.composition)
if not self.structure_list[h]:
self.structure_list[h].append(structure)
return True
def get_sg(s):
finder = SpacegroupAnalyzer(s, symprec=self.symprec)
return finder.get_space_group_number()
for s in self.structure_list[h]:
if self.symprec is None or \
get_sg(s) == get_sg(structure):
if self.structure_matcher.fit(s, structure):
return False
self.structure_list[h].append(structure)
return True
class RemoveExistingFilter(AbstractStructureFilter):
"""
This filter removes structures existing in a given list from the transmuter.
"""
def __init__(self, existing_structures, structure_matcher=StructureMatcher(
comparator=ElementComparator()), symprec=None):
"""
Remove existing structures based on the structure matcher
and symmetry (if symprec is given).
Args:
existing_structures: List of existing structures to compare with
structure_matcher: Provides a structure matcher to be used for
structure comparison.
symprec: The precision in the symmetry finder algorithm if None (
default value), no symmetry check is performed and only the
structure matcher is used. A recommended value is 1e-5.
"""
self.symprec = symprec
self.structure_list = []
self.existing_structures = existing_structures
if isinstance(structure_matcher, dict):
self.structure_matcher = StructureMatcher.from_dict(structure_matcher)
else:
self.structure_matcher = structure_matcher
def test(self, structure):
def get_sg(s):
finder = SpacegroupAnalyzer(s, symprec=self.symprec)
return finder.get_space_group_number()
for s in self.existing_structures:
if self.structure_matcher._comparator.get_hash(structure.composition) ==\
self.structure_matcher._comparator.get_hash(s.composition):
if self.symprec is None or \
get_sg(s) == get_sg(structure):
if self.structure_matcher.fit(s, structure):
return False
self.structure_list.append(structure)
return True
def as_dict(self):
return {"version": __version__, "@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"init_args": {"structure_matcher": self.structure_matcher.as_dict()}}
class ChargeBalanceFilter(AbstractStructureFilter):
"""
This filter removes structures that are not charge balanced from the
transmuter. This only works if the structure is oxidation state
decorated, as structures with only elemental sites are automatically
assumed to have net charge of 0.
"""
def __init__(self):
pass
def test(self, structure):
if structure.charge == 0.0:
return True
else:
return False
class SpeciesMaxDistFilter(AbstractStructureFilter):
"""
This filter removes structures that do have two particular species that are
not nearest neighbors by a predefined max_dist. For instance, if you are
analyzing Li battery materials, you would expect that each Li+ would be
nearest neighbor to lower oxidation state transition metal for
electrostatic reasons. This only works if the structure is oxidation state
decorated, as structures with only elemental sites are automatically
assumed to have net charge of 0.
"""
def __init__(self, sp1, sp2, max_dist):
self.sp1 = get_el_sp(sp1)
self.sp2 = get_el_sp(sp2)
self.max_dist = max_dist
def test(self, structure):
sp1_indices = [i for i, site in enumerate(structure) if
site.specie == self.sp1]
sp2_indices = [i for i, site in enumerate(structure) if
site.specie == self.sp2]
fcoords = structure.frac_coords
fcoords1 = fcoords[sp1_indices, :]
fcoords2 = fcoords[sp2_indices, :]
lattice = structure.lattice
dists = lattice.get_all_distances(fcoords1, fcoords2)
return all([any(row) for row in dists < self.max_dist])
|
{
"content_hash": "02532e70b89e75a77daaff4c050adbb6",
"timestamp": "",
"source": "github",
"line_count": 291,
"max_line_length": 85,
"avg_line_length": 38.580756013745706,
"alnum_prop": 0.5966865591876725,
"repo_name": "dongsenfo/pymatgen",
"id": "94e98899f561b0f47b11862a0418e8b80087b68e",
"size": "11337",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pymatgen/alchemy/filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Dockerfile",
"bytes": "275"
},
{
"name": "HTML",
"bytes": "827"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "15152267"
},
{
"name": "Python",
"bytes": "7882640"
},
{
"name": "Roff",
"bytes": "1898220"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class KeyVaultAndSecretReference(Model):
"""Key Vault Secret Url and vault id of the encryption key .
All required parameters must be populated in order to send to Azure.
:param source_vault: Required. Resource id of the KeyVault containing the
key or secret
:type source_vault: ~azure.mgmt.compute.v2018_04_01.models.SourceVault
:param secret_url: Required. Url pointing to a key or secret in KeyVault
:type secret_url: str
"""
_validation = {
'source_vault': {'required': True},
'secret_url': {'required': True},
}
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SourceVault'},
'secret_url': {'key': 'secretUrl', 'type': 'str'},
}
def __init__(self, *, source_vault, secret_url: str, **kwargs) -> None:
super(KeyVaultAndSecretReference, self).__init__(**kwargs)
self.source_vault = source_vault
self.secret_url = secret_url
|
{
"content_hash": "d2f55fe0ac6bed48c91132bc2c5f3f7b",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 77,
"avg_line_length": 34.48275862068966,
"alnum_prop": 0.643,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "d342cfb37a14224f8011ffcf507a1aba5bd29e4e",
"size": "1474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/models/key_vault_and_secret_reference_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
"""Test the Z-Wave JS binary sensor platform."""
from zwave_js_server.event import Event
from zwave_js_server.model.node import Node
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_DOOR,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_TAMPER,
)
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
ENTITY_CATEGORY_DIAGNOSTIC,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from .common import (
DISABLED_LEGACY_BINARY_SENSOR,
ENABLED_LEGACY_BINARY_SENSOR,
LOW_BATTERY_BINARY_SENSOR,
NOTIFICATION_MOTION_BINARY_SENSOR,
PROPERTY_DOOR_STATUS_BINARY_SENSOR,
TAMPER_SENSOR,
)
from tests.common import MockConfigEntry
async def test_low_battery_sensor(hass, multisensor_6, integration):
"""Test boolean binary sensor of type low battery."""
state = hass.states.get(LOW_BATTERY_BINARY_SENSOR)
assert state
assert state.state == STATE_OFF
assert state.attributes["device_class"] == DEVICE_CLASS_BATTERY
registry = er.async_get(hass)
entity_entry = registry.async_get(LOW_BATTERY_BINARY_SENSOR)
assert entity_entry
assert entity_entry.entity_category == ENTITY_CATEGORY_DIAGNOSTIC
async def test_enabled_legacy_sensor(hass, ecolink_door_sensor, integration):
"""Test enabled legacy boolean binary sensor."""
node = ecolink_door_sensor
# this node has Notification CC not (fully) implemented
# so legacy binary sensor should be enabled
state = hass.states.get(ENABLED_LEGACY_BINARY_SENSOR)
assert state
assert state.state == STATE_OFF
assert state.attributes.get("device_class") is None
# Test state updates from value updated event
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 53,
"args": {
"commandClassName": "Binary Sensor",
"commandClass": 48,
"endpoint": 0,
"property": "Any",
"newValue": True,
"prevValue": False,
"propertyName": "Any",
},
},
)
node.receive_event(event)
state = hass.states.get(ENABLED_LEGACY_BINARY_SENSOR)
assert state.state == STATE_ON
async def test_disabled_legacy_sensor(hass, multisensor_6, integration):
"""Test disabled legacy boolean binary sensor."""
# this node has Notification CC implemented so legacy binary sensor should be disabled
registry = er.async_get(hass)
entity_id = DISABLED_LEGACY_BINARY_SENSOR
state = hass.states.get(entity_id)
assert state is None
entry = registry.async_get(entity_id)
assert entry
assert entry.disabled
assert entry.disabled_by == er.DISABLED_INTEGRATION
# Test enabling legacy entity
updated_entry = registry.async_update_entity(
entry.entity_id, **{"disabled_by": None}
)
assert updated_entry != entry
assert updated_entry.disabled is False
async def test_notification_sensor(hass, multisensor_6, integration):
"""Test binary sensor created from Notification CC."""
state = hass.states.get(NOTIFICATION_MOTION_BINARY_SENSOR)
assert state
assert state.state == STATE_ON
assert state.attributes["device_class"] == DEVICE_CLASS_MOTION
state = hass.states.get(TAMPER_SENSOR)
assert state
assert state.state == STATE_OFF
assert state.attributes["device_class"] == DEVICE_CLASS_TAMPER
registry = er.async_get(hass)
entity_entry = registry.async_get(TAMPER_SENSOR)
assert entity_entry
assert entity_entry.entity_category == ENTITY_CATEGORY_DIAGNOSTIC
async def test_notification_off_state(
hass: HomeAssistant,
lock_popp_electric_strike_lock_control: Node,
):
"""Test the description off_state attribute of certain notification sensors."""
node = lock_popp_electric_strike_lock_control
# Remove all other values except the door state value.
node.values = {
value_id: value
for value_id, value in node.values.items()
if value_id == "62-113-0-Access Control-Door state"
}
entry = MockConfigEntry(domain="zwave_js", data={"url": "ws://test.org"})
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
door_states = [
state
for state in hass.states.async_all("binary_sensor")
if state.attributes.get("device_class") == DEVICE_CLASS_DOOR
]
# Only one entity should be created for the Door state notification states.
assert len(door_states) == 1
state = door_states[0]
assert state
assert state.entity_id == "binary_sensor.node_62_access_control_window_door_is_open"
async def test_property_sensor_door_status(hass, lock_august_pro, integration):
"""Test property binary sensor with sensor mapping (doorStatus)."""
node = lock_august_pro
state = hass.states.get(PROPERTY_DOOR_STATUS_BINARY_SENSOR)
assert state
assert state.state == STATE_OFF
assert state.attributes["device_class"] == DEVICE_CLASS_DOOR
# open door
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 6,
"args": {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "doorStatus",
"newValue": "open",
"prevValue": "closed",
"propertyName": "doorStatus",
},
},
)
node.receive_event(event)
state = hass.states.get(PROPERTY_DOOR_STATUS_BINARY_SENSOR)
assert state
assert state.state == STATE_ON
# close door
event = Event(
type="value updated",
data={
"source": "node",
"event": "value updated",
"nodeId": 6,
"args": {
"commandClassName": "Door Lock",
"commandClass": 98,
"endpoint": 0,
"property": "doorStatus",
"newValue": "closed",
"prevValue": "open",
"propertyName": "doorStatus",
},
},
)
node.receive_event(event)
state = hass.states.get(PROPERTY_DOOR_STATUS_BINARY_SENSOR)
assert state
assert state.state == STATE_OFF
|
{
"content_hash": "d734e8ae602eb5bcc3c1b6fee0a8e1b3",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 90,
"avg_line_length": 31.22596153846154,
"alnum_prop": 0.6334103156274057,
"repo_name": "jawilson/home-assistant",
"id": "d5aab6cd0f9935bae80ec1adb6ffdcbdea1ebcc1",
"size": "6495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/components/zwave_js/test_binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
import sys
import fileinput
import csv
filename = raw_input("What file to convert?: ")
copy = open("tanh_convert-" + filename, "wb")
writer = csv.writer(copy, delimiter=',', quotechar='', quoting=csv.QUOTE_NONE)
with open(filename, "rU") as f:
reader = csv.reader(f, delimiter=',', quotechar='', quoting=csv.QUOTE_NONE)
head=reader.next()
writer.writerow(head)
for row in reader:
if len(row)>3:
writer.writerow(row)
elif row[0]=="1":
writer.writerow(["1","-1"])
elif row[0]=="0":
writer.writerow(["-1","1"])
f.close()
copy.close()
|
{
"content_hash": "e53d382e6ced18c7d6fafa0d714b3984",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 29.428571428571427,
"alnum_prop": 0.5857605177993528,
"repo_name": "adam-nnl/AFootball",
"id": "d2c545e83f8e19c445a860b3cedc07c8381500aa",
"size": "618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python Scripts/logsig-tanh-tsconverter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "280346"
}
],
"symlink_target": ""
}
|
import os
import os.path
import subprocess
from asp.avro_inter.py_avro_inter import *
import sys
class ScalaFunction:
def __init__(self, classname, source_dir):
self.classname = classname
self.source_dir = source_dir
def find_close(self,str):
index = len(str)-1
char = str[index]
while (char!=']'):
index -=1
char = str[index]
return index
def __call__(self, *args, **kwargs):
write_avro_file(args, 'args.avro')
prefix = os.environ['CLASSPATH']
class_path = prefix +':'+self.source_dir + ':/root/asp/asp/avro_inter'
# make_jar should be edited so that source.jar contains all the necessary files
# to be deployed to the slave nodes
os.system('/root/asp/asp/jit/make_source_jar '+ self.source_dir)
os.environ['SOURCE_LOC'] = self.source_dir + "/source.jar"
out = subprocess.Popen('/root/spark/run -cp '+class_path + ' ' +self.classname, shell=True)
out.wait()
if out.returncode != 0:
print "return code is:" , out.returncode
raise Exception("Bad return code")
results = read_avro_file('results.avro').next().values()[0]
os.remove('args.avro')
os.remove('results.avro')
return results
class PseudoModule:
'''Pretends to be a Python module that contains the generated functions.'''
def __init__(self):
self.__dict__["__special_functions"] = {}
def __getattr__(self, name):
if name in self.__dict__["__special_functions"].keys():
return self.__dict__["__special_functions"][name]
else:
raise Error
def __setattr__(self, name, value):
self.__dict__["__special_functions"][name] = value
class ScalaModule:
def __init__(self):
self.mod_body = []
self.init_body = []
def add_to_init(self, body):
self.init_body.extend([body])
def add_function(self):
# This is only for already compiled functions, I think
pass
def add_to_module(self, body):
self.mod_body.extend(body)
def add_to_preamble(self):
pass
def generate(self):
s = ""
for line in self.mod_body:
if type(line) != str:
raise Error("Not a string")
s += line
return s
def compile(self, toolchain, debug=True, cache_dir=None):
if cache_dir is None:
import tempfile
cache_dir = tempfile.gettempdir()
else:
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
source_string = self.generate()
hex_checksum = self.calculate_hex_checksum(source_string)
mod_cache_dir = os.path.join(cache_dir, hex_checksum)
# Should we assume that if the directory exists, then we don't need to
# recompile?
if not os.path.isdir(mod_cache_dir):
os.makedirs(mod_cache_dir)
filepath = os.path.join(mod_cache_dir, "asp_tmp.scala")
source = open(filepath, 'w')
source.write(source_string)
source.close()
result = os.system("scalac -optimise -d %s %s" % (mod_cache_dir, filepath))
os.remove(filepath)
if result != 0:
os.system("rm -rf " + mod_cache_dir)
raise Exception("Could not compile")
mod = PseudoModule()
for fname in self.init_body:
self.func = ScalaFunction(fname, mod_cache_dir)
setattr(mod, fname, self.func)
return mod
# Method borrowed from codepy.jit
def calculate_hex_checksum(self, source_string):
try:
import hashlib
checksum = hashlib.md5()
except ImportError:
# for Python << 2.5
import md5
checksum = md5.new()
checksum.update(source_string)
#checksum.update(str(toolchain.abi_id()))
return checksum.hexdigest()
class ScalaToolchain:
pass
|
{
"content_hash": "6cb6e09464453b6ab632d9628e3b99ba",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 103,
"avg_line_length": 31.801526717557252,
"alnum_prop": 0.5516082573211714,
"repo_name": "pbirsinger/aspNew",
"id": "6eb082dc62bdf0f36d4a6f4e64c73388916bc9e9",
"size": "4166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "asp/jit/scala_module.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "11521"
},
{
"name": "Python",
"bytes": "389309"
},
{
"name": "Scala",
"bytes": "4285"
},
{
"name": "Shell",
"bytes": "6318"
}
],
"symlink_target": ""
}
|
import itertools
__all__ = ['Normal', 'Comment', 'Constant', 'Identifier', 'Statement',
'Define', 'Type', 'Special', 'Underlined', 'Error', 'Attention',
'Header', 'Regular', 'Italic', 'Bold', 'Underline', 'depth', 'fg',
'bg', 'Theme']
class _Face(tuple):
def __init__(self, *_):
super(_Face, self).__init__()
self.subtypes = set()
def __getattr__(self, value):
if not value or not value[0].isupper():
return tuple.__getattribute__(self, value)
face = _Face(self + (value,))
setattr(self, value, face)
self.subtypes.add(face)
return face
def __repr__(self):
return 'face' + (self and '-' or '') + '-'.join(self).lower()
Face = _Face()
Normal = Face.Normal
Comment = Face.Comment
Constant = Face.Constant
Identifier = Face.Identifier
Statement = Face.Statement
Define = Face.Define
Type = Face.Type
Special = Face.Special
Underlined = Face.Underlined
Error = Face.Error
Attention = Face.Attention
Header = Face.Header
class _Typeface(tuple):
# pylint: disable=W0231
def __init__(self, *_):
self.subtypes = set()
def __getattr__(self, value):
if not value or not value[0].isupper():
return tuple.__getattribute__(self, value)
tp = _Typeface(self + (value,))
setattr(self, value, tp)
self.subtypes.add(tp)
return tp
def __repr__(self):
return 'typeface' + (self and '-' or '') + '-'.join(self).lower()
Typeface = _Typeface()
Regular = Typeface.Regular
Italic = Typeface.Italic
Bold = Typeface.Bold
Underline = Typeface.Underline
class depth(int):
pass
class fg(int):
pass
class bg(int):
pass
class ThemeMeta(type):
def _process_definition(cls, fdef):
if type(fdef) is _Typeface:
return (Typeface, fdef)
elif type(fdef) is fg:
return (fg, fdef)
elif type(fdef) is bg:
return (bg, fdef)
else:
assert False, 'unknown definition %r' % fdef
def _process_color(cls, facesdefs):
face_attr = {}
for fdef in facesdefs:
attr = cls._process_definition(fdef)
face_attr[attr[0]] = attr[1]
return face_attr
def _process_face(cls, facesdefs, face):
face_colors = {}
for colors, fdefs in facesdefs[face].items():
assert type(colors) is depth
face_colors[colors] = cls._process_color(fdefs)
return face_colors
def process_faces(cls, name, facesdefs=None):
faces = {}
facesdefs = facesdefs or cls.faces[name]
for face in facesdefs.keys():
if type(face) is _Face:
faces[face] = cls._process_face(facesdefs, face)
else:
assert False, 'unknown face type %r' % face
return faces
def get_facesdefs(cls):
faces = {}
for c in itertools.chain((cls,), cls.__mro__):
fcs = c.__dict__.get('faces', {})
for name, components in fcs.items():
current = faces.get(name)
if current is None:
faces[name] = components
return faces
def __call__(cls, *args, **kwds):
if '_faces' not in cls.__dict__ or not cls.__dict__['_faces']:
# pylint: disable=W0201
cls._faces = cls.process_faces('', cls.get_facesdefs())
return type.__call__(cls, *args, **kwds)
class Theme(object):
'''Base class for all FlowUI themes
The Theme class provides all the basic functionality required to make a
FlowUI theme work together with a Terminal instance. The purpose is to
abstract as much as possible of the functionality so that the Theme only
has to define the colors relevant to it.
Controls:
clear-screen -- Clear the screen and reset the cursor
Typefaces:
regular
bold
italic
underline
Faces:
face-normal
face-comment
face-constant -- string, char, number etc constants
face-identifier -- variable/function name
face-statement -- statements (if, else, for etc)
face-define -- definitions (i.e. #define X)
face-type -- types (integer, static, struct etc)
face-special -- special symbols or characters
face-underlined -- text that stands out (i.e. links)
face-error
face-attention -- anything that needs extra attention
face-header -- section headers etc
'''
__metaclass__ = ThemeMeta
name = None
colors = []
faces = {}
def face(self, f, depth_):
'''Get the specified face'''
# pylint: disable=E1101
assert f in self._faces, 'face %r not found' % f
face = self._faces[f]
assert depth_ in face, 'depth %r not available in %f' % (depth_, face)
return face[depth_]
|
{
"content_hash": "0c7322902b1292adde7ce4042924e280",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 78,
"avg_line_length": 26.410810810810812,
"alnum_prop": 0.5794105607859189,
"repo_name": "dholm/FlowUI",
"id": "05eb54b4d5e9f85652f5497ed4406fd565550c4d",
"size": "6476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flowui/theme.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "63028"
}
],
"symlink_target": ""
}
|
import unittest
from unittest import mock
from knack.util import CLIError
class TestNetworkUnitTests(unittest.TestCase):
def test_network_get_nic_ip_config(self):
from azure.cli.command_modules.network.custom import _get_nic_ip_config
# 1 - Test that if ip_configurations property is null, error is thrown
nic = mock.MagicMock()
nic.ip_configurations = None
with self.assertRaises(CLIError):
_get_nic_ip_config(nic, 'test')
def mock_ip_config(name, value):
fake = mock.MagicMock()
fake.name = name
fake.value = value
return fake
nic = mock.MagicMock()
nic.ip_configurations = [mock_ip_config('test1', '1'), mock_ip_config('test2', '2'),
mock_ip_config('test3', '3')]
# 2 - Test that if ip_configurations is not null but no match, error is thrown
with self.assertRaises(CLIError):
_get_nic_ip_config(nic, 'test4')
# 3 - Test that match is returned
self.assertEqual(_get_nic_ip_config(nic, 'test2').value, '2')
def test_network_upsert(self):
from azure.cli.core.commands import upsert_to_collection
obj1 = mock.MagicMock()
obj1.key = 'object1'
obj1.value = 'cat'
obj2 = mock.MagicMock()
obj2.key = 'object2'
obj2.value = 'dog'
# 1 - verify upsert to a null collection
parent_with_null_collection = mock.MagicMock()
parent_with_null_collection.collection = None
upsert_to_collection(parent_with_null_collection, 'collection', obj1, 'key')
result = parent_with_null_collection.collection
self.assertEqual(len(result), 1)
self.assertEqual(result[0].value, 'cat')
# 2 - verify upsert to an empty collection
parent_with_empty_collection = mock.MagicMock()
parent_with_empty_collection.collection = []
upsert_to_collection(parent_with_empty_collection, 'collection', obj1, 'key')
result = parent_with_empty_collection.collection
self.assertEqual(len(result), 1)
self.assertEqual(result[0].value, 'cat')
# 3 - verify can add more than one
upsert_to_collection(parent_with_empty_collection, 'collection', obj2, 'key')
result = parent_with_empty_collection.collection
self.assertEqual(len(result), 2)
self.assertEqual(result[1].value, 'dog')
# 4 - verify update to existing collection
obj2.value = 'noodle'
upsert_to_collection(parent_with_empty_collection, 'collection', obj2, 'key')
result = parent_with_empty_collection.collection
self.assertEqual(len(result), 2)
self.assertEqual(result[1].value, 'noodle')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "bdfb479a10c458dc2c5f354bf1afcd1a",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 92,
"avg_line_length": 37.23684210526316,
"alnum_prop": 0.6240282685512367,
"repo_name": "yugangw-msft/azure-cli",
"id": "c48603c6ece9d6a65fce535df077c09feade1346",
"size": "3176",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "src/azure-cli/azure/cli/command_modules/network/tests/hybrid_2018_03_01/test_network_unit_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "5355"
},
{
"name": "Batchfile",
"bytes": "14110"
},
{
"name": "Bicep",
"bytes": "1679"
},
{
"name": "C#",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "Dockerfile",
"bytes": "8427"
},
{
"name": "HTML",
"bytes": "794"
},
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Jupyter Notebook",
"bytes": "389"
},
{
"name": "PowerShell",
"bytes": "1781"
},
{
"name": "Python",
"bytes": "24270340"
},
{
"name": "Rich Text Format",
"bytes": "12032"
},
{
"name": "Roff",
"bytes": "1036959"
},
{
"name": "Shell",
"bytes": "56023"
},
{
"name": "TSQL",
"bytes": "1145"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from tekton import router
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_not_required
from book_app import facade
from routes.books import admin
@login_not_required
@no_csrf
def index():
cmd = facade.list_books_cmd()
books = cmd()
public_form = facade.book_public_form()
book_public_dcts = [public_form.fill_with_model(book) for book in books]
context = {'books': book_public_dcts,'admin_path':router.to_path(admin)}
return TemplateResponse(context)
|
{
"content_hash": "47a13673c434e108e3c87854e7b2ad65",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 76,
"avg_line_length": 32.89473684210526,
"alnum_prop": 0.7568,
"repo_name": "marcosxddh/aula_script",
"id": "31e4ab9bb9c714d3dba7ac06b3086b7709b67326",
"size": "649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/appengine/routes/books/home.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "42358"
},
{
"name": "C++",
"bytes": "3500"
},
{
"name": "CSS",
"bytes": "128629"
},
{
"name": "JavaScript",
"bytes": "4226"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "94796"
},
{
"name": "Shell",
"bytes": "4168"
}
],
"symlink_target": ""
}
|
"""Tests for `tf.data.experimental.parallel_interleave()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import threading
import time
import numpy as np
from six.moves import zip_longest
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ParallelInterleaveTest(test_base.DatasetTestBase):
def setUp(self):
self.error = None
self.repeat_count = 2
# Set up threading events used to sequence when items are produced that
# are subsequently interleaved. These events allow us to deterministically
# simulate slowdowns and force sloppiness.
self.read_coordination_events = {}
self.write_coordination_events = {}
# input values [4, 5, 6] are the common case for the tests; set defaults
for i in range(4, 7):
self.read_coordination_events[i] = threading.Semaphore(0)
self.write_coordination_events[i] = threading.Event()
def dataset_fn(self, input_values, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements):
def map_py_fn(x):
self.write_coordination_events[x].wait()
self.write_coordination_events[x].clear()
self.read_coordination_events[x].release()
if self.error:
err = self.error
self.error = None
raise err # pylint: disable=raising-bad-type
return x * x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(x)
return dataset.map(map_fn)
return dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
self.repeat_count).apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements))
def _interleave(self, lists, cycle_length, block_length):
"""Python implementation of interleave used for testing."""
num_open = 0
# `all_iterators` acts as a queue of iterators over each element of `lists`.
all_iterators = [iter(l) for l in lists]
# `open_iterators` are the iterators whose elements are currently being
# interleaved.
open_iterators = []
for i in range(cycle_length):
if all_iterators:
open_iterators.append(all_iterators.pop(0))
num_open += 1
else:
open_iterators.append(None)
while num_open or all_iterators:
for i in range(cycle_length):
if open_iterators[i] is None:
if all_iterators:
open_iterators[i] = all_iterators.pop(0)
num_open += 1
else:
continue
for _ in range(block_length):
try:
yield next(open_iterators[i])
except StopIteration:
open_iterators[i] = None
num_open -= 1
break
def testPythonImplementation(self):
input_lists = [[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6],
[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]]
# Cycle length 1 acts like `Dataset.flat_map()`.
expected_elements = itertools.chain(*input_lists)
for expected, produced in zip(expected_elements,
self._interleave(input_lists, 1, 1)):
self.assertEqual(expected, produced)
# Cycle length > 1.
expected_elements = [
4, 5, 4, 5, 4, 5, 4, 5, 5, 6, 6, 4, 6, 4, 6, 4, 6, 4, 6, 5, 6, 5, 6, 5,
6, 5, 6, 5, 6, 6
]
for index, (expected, produced) in enumerate(
zip_longest(expected_elements, self._interleave(input_lists, 2, 1))):
self.assertEqual(expected, produced, "Values differ at %s. %s != %s" %
(index, expected, produced))
def testPythonImplementationBlockLength(self):
input_lists = [[4] * 4, [5] * 5, [6] * 6] * 2
expected_elements = [
4, 4, 5, 5, 4, 4, 5, 5, 5, 6, 6, 4, 4, 6, 6, 4, 4, 6, 6, 5, 5, 6, 6, 5,
5, 6, 6, 5, 6, 6
]
for index, (expected, produced) in enumerate(
zip_longest(expected_elements, self._interleave(input_lists, 2, 2))):
self.assertEqual(expected, produced, "Values differ at %s. %s != %s" %
(index, expected, produced))
def testPythonImplementationEmptyLists(self):
input_lists = [[4, 4, 4, 4], [], [6, 6, 6, 6, 6, 6], [4, 4, 4, 4], [],
[6, 6, 6, 6, 6, 6]]
expected_elements = [
4, 4, 6, 4, 6, 4, 6, 6, 4, 6, 4, 6, 4, 4, 6, 6, 6, 6, 6, 6
]
for index, (expected, produced) in enumerate(
zip_longest(expected_elements, self._interleave(input_lists, 2, 1))):
self.assertEqual(expected, produced, "Values differ at %s. %s != %s" %
(index, expected, produced))
def _clear_coordination_events(self):
for i in range(4, 7):
self.read_coordination_events[i] = threading.Semaphore(0)
self.write_coordination_events[i].clear()
def _allow_all_map_threads(self):
for i in range(4, 7):
self.write_coordination_events[i].set()
def _testSingleThreaded(self, sloppy=False, prefetch_input_elements=0):
# cycle_length=1,block_length=1 acts like `Dataset.interleave()` and
# `Dataset.flat_map()` and is single-threaded. No synchronization required.
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=1,
block_length=1,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=prefetch_input_elements))
for expected_element in self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 1, 1):
self.write_coordination_events[expected_element].set()
self.assertEqual(expected_element * expected_element,
self.evaluate(next_element()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testSingleThreaded(self):
self._testSingleThreaded()
def testSingleThreadedSloppy(self):
self._testSingleThreaded(sloppy=True)
def testSingleThreadedPrefetch1Itr(self):
self._testSingleThreaded(prefetch_input_elements=1)
def testSingleThreadedPrefetch1ItrSloppy(self):
self._testSingleThreaded(prefetch_input_elements=1, sloppy=True)
def testSingleThreadedRagged(self):
# Tests a sequence with wildly different elements per iterator.
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([3, 7, 4]),
cycle_length=2,
block_length=1,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=1))
# Add coordination values for 3 and 7
self.read_coordination_events[3] = threading.Semaphore(0)
self.write_coordination_events[3] = threading.Event()
self.read_coordination_events[7] = threading.Semaphore(0)
self.write_coordination_events[7] = threading.Event()
for expected_element in self._interleave(
[[3] * 3, [7] * 7, [4] * 4] * self.repeat_count, 2, 1):
self.write_coordination_events[expected_element].set()
output = self.evaluate(next_element())
self.assertEqual(expected_element * expected_element, output)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def _testTwoThreadsNoContention(self, sloppy=False):
# num_threads > 1.
# Explicit coordination should result in `Dataset.interleave()` behavior
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=1))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
1)):
self.write_coordination_events[expected_element].set()
if done_first_event: # First event starts the worker threads.
self.read_coordination_events[expected_element].acquire()
actual_element = self.evaluate(next_element())
if not done_first_event:
self.read_coordination_events[expected_element].acquire()
done_first_event = True
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testTwoThreadsNoContention(self):
self._testTwoThreadsNoContention()
def testTwoThreadsNoContentionSloppy(self):
self._testTwoThreadsNoContention(sloppy=True)
def _testTwoThreadsNoContentionWithRaces(self, sloppy=False):
"""Tests where all the workers race in producing elements.
Note: this is in contrast with the previous test which carefully sequences
the execution of the map functions.
Args:
sloppy: Whether to be sloppy or not.
"""
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=1))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
1)):
if done_first_event: # First event starts the worker threads.
self._allow_all_map_threads()
self.read_coordination_events[expected_element].acquire()
else:
self.write_coordination_events[expected_element].set()
time.sleep(0.5) # Sleep to consistently "avoid" the race condition.
actual_element = self.evaluate(next_element())
if not done_first_event:
done_first_event = True
self.assertTrue(
self.read_coordination_events[expected_element].acquire(False))
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testTwoThreadsNoContentionWithRaces(self):
self._testTwoThreadsNoContentionWithRaces()
def testTwoThreadsNoContentionWithRacesSloppy(self):
self._testTwoThreadsNoContentionWithRaces(sloppy=True)
def _testTwoThreadsNoContentionBlockLength(self, sloppy=False):
# num_threads > 1.
# Explicit coordination should result in `Dataset.interleave()` behavior
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=2,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=1))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
2)):
self.write_coordination_events[expected_element].set()
if done_first_event: # First event starts the worker threads.
self.read_coordination_events[expected_element].acquire()
actual_element = self.evaluate(next_element())
if not done_first_event:
done_first_event = True
self.read_coordination_events[expected_element].acquire()
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testTwoThreadsNoContentionBlockLength(self):
self._testTwoThreadsNoContentionBlockLength()
def testTwoThreadsNoContentionBlockLengthSloppy(self):
self._testTwoThreadsNoContentionBlockLength(sloppy=True)
def _testTwoThreadsNoContentionWithRacesAndBlocking(self, sloppy=False):
"""Tests where all the workers race in producing elements.
Note: this is in contrast with the previous test which carefully sequences
the execution of the map functions.
Args:
sloppy: Whether to be sloppy or not.
"""
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=2,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=1))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
2)):
if done_first_event: # First event starts the worker threads.
self._allow_all_map_threads()
self.read_coordination_events[expected_element].acquire()
else:
self.write_coordination_events[expected_element].set()
time.sleep(0.5) # Sleep to consistently "avoid" the race condition.
actual_element = self.evaluate(next_element())
if not done_first_event:
done_first_event = True
self.assertTrue(
self.read_coordination_events[expected_element].acquire(False))
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testTwoThreadsNoContentionWithRacesAndBlocking(self):
self._testTwoThreadsNoContentionWithRacesAndBlocking()
def testTwoThreadsNoContentionWithRacesAndBlockingSloppy(self):
self._testTwoThreadsNoContentionWithRacesAndBlocking(sloppy=True)
def _testEmptyInput(self, sloppy=False):
# Empty input.
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([]),
cycle_length=2,
block_length=3,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=0))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testEmptyInput(self):
self._testEmptyInput()
def testEmptyInputSloppy(self):
self._testEmptyInput(sloppy=True)
def _testNonEmptyInputIntoEmptyOutputs(self, sloppy=False):
# Non-empty input leading to empty output.
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([0, 0, 0]),
cycle_length=2,
block_length=3,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=0))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testNonEmptyInputIntoEmptyOutputs(self):
self._testNonEmptyInputIntoEmptyOutputs()
def testNonEmptyInputIntoEmptyOutputsSloppy(self):
self._testNonEmptyInputIntoEmptyOutputs(sloppy=True)
def _testPartiallyEmptyOutputs(self, sloppy=False, prefetch_input_elements=1):
race_indices = {2, 8, 14} # Sequence points when sloppy mode has race conds
# Mixture of non-empty and empty interleaved datasets.
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 0, 6]),
cycle_length=2,
block_length=1,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=prefetch_input_elements))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [], [6] * 6] * self.repeat_count, 2, 1)):
self.write_coordination_events[expected_element].set()
# First event starts the worker threads. Additionally, when running the
# sloppy case with prefetch_input_elements=0, we get stuck if we wait
# for the read coordination event for certain event orderings in the
# presence of finishing iterators.
if done_first_event and not (sloppy and (i in race_indices)):
self.read_coordination_events[expected_element].acquire()
actual_element = self.evaluate(next_element())
if not done_first_event or (sloppy and (i in race_indices)):
done_first_event = True
self.read_coordination_events[expected_element].acquire()
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
def testPartiallyEmptyOutputs(self):
self._testPartiallyEmptyOutputs()
def testPartiallyEmptyOutputsSloppy(self):
self._testPartiallyEmptyOutputs(sloppy=True, prefetch_input_elements=0)
def testDelayedOutputSloppy(self):
# Explicitly control the sequence of events to ensure we correctly avoid
# head-of-line blocking.
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=True,
buffer_output_elements=1,
prefetch_input_elements=0))
mis_ordering = [
4, 4, 5, 4, 5, 5, 4, 5, 6, 6, 6, 5, 4, 4, 6, 6, 4, 4, 6, 5, 6, 6, 6, 6,
5, 5, 5, 5, 6, 6
]
for element in mis_ordering:
self.write_coordination_events[element].set()
self.assertEqual(element * element, self.evaluate(next_element()))
self.assertTrue(self.read_coordination_events[element].acquire(False))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testBlockLengthWithContentionSloppy(self):
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=True,
buffer_output_elements=1,
prefetch_input_elements=1))
# Test against a generating sequence that differs from the uncontended
# case, in order to prove sloppy correctness.
for i, expected_element in enumerate(
self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * self.repeat_count,
cycle_length=2,
block_length=3)):
self.write_coordination_events[expected_element].set()
if done_first_event: # First event starts the worker threads.
self.read_coordination_events[expected_element].acquire()
actual_element = self.evaluate(next_element())
if not done_first_event:
self.read_coordination_events[expected_element].acquire()
done_first_event = True
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def _testEarlyExit(self, sloppy=False):
# Exiting without consuming all input should not block
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=3,
block_length=2,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=0))
for i in range(4, 7):
self.write_coordination_events[i].set()
elem = self.evaluate(next_element()) # Start all workers
# Allow the one successful worker to progress beyond the py_func again.
elem = int(math.sqrt(elem))
self.write_coordination_events[elem].set()
self.read_coordination_events[elem].acquire()
# Allow the prefetch to succeed
for i in range(4, 7):
self.read_coordination_events[i].acquire()
self.write_coordination_events[i].set()
def testEarlyExit(self):
self._testEarlyExit()
def testEarlyExitSloppy(self):
self._testEarlyExit(sloppy=True)
def _testTooManyReaders(self, sloppy=False):
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(math_ops.cast(x, dtype=dtypes.int64))
return dataset
dataset = dataset_ops.Dataset.from_tensor_slices([4, 5, 6])
dataset = dataset.repeat(self.repeat_count)
dataset = dataset.apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length=16, block_length=2, sloppy=sloppy))
get_next = self.getNext(dataset)
output_values = []
for _ in range(30):
output_values.append(self.evaluate(get_next()))
expected_values = self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 1, 2)
self.assertItemsEqual(output_values, expected_values)
def testTooManyReaders(self):
self._testTooManyReaders()
def testTooManyReadersSloppy(self):
self._testTooManyReaders(sloppy=True)
def testSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
dataset = dataset_ops.Dataset.range(10).map(_map_fn).apply(
interleave_ops.parallel_interleave(_interleave_fn, cycle_length=1))
get_next = self.getNext(dataset)
for i in range(10):
for j in range(2):
expected = [i, 0] if j % 2 == 0 else [0, -i]
self.assertAllEqual(expected, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
def testErrorsInOutputFn(self):
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=0))
except_on_element_indices = set([3])
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
1)):
if i in except_on_element_indices:
self.error = ValueError()
self.write_coordination_events[expected_element].set()
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element())
else:
self.write_coordination_events[expected_element].set()
actual_element = self.evaluate(next_element())
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testErrorsInInputFn(self):
def map_py_fn(x):
if x == 5:
raise ValueError()
return x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(x)
return dataset
def dataset_fn(input_values, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements):
return dataset_ops.Dataset.from_tensor_slices(input_values).map(
map_fn).repeat(self.repeat_count).apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements))
next_element = self.getNext(
dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=0))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5], [6] * 6] * self.repeat_count, 2, 1)):
if expected_element == 5:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element())
else:
actual_element = self.evaluate(next_element())
self.assertEqual(
expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testErrorsInInterleaveFn(self):
def map_py_fn(x):
if x == 5:
raise ValueError()
return x
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
y = script_ops.py_func(map_py_fn, [x], x.dtype)
dataset = dataset.repeat(y)
return dataset
def dataset_fn(input_values, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements):
return dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
self.repeat_count).apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements))
next_element = self.getNext(
dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=0))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5], [6] * 6] * self.repeat_count, 2, 1)):
if expected_element == 5:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element())
else:
actual_element = self.evaluate(next_element())
self.assertEqual(
expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def testShutdownRace(self):
dataset = dataset_ops.Dataset.range(20)
map_fn = lambda x: dataset_ops.Dataset.range(20 * x, 20 * (x + 1))
dataset = dataset.apply(
interleave_ops.parallel_interleave(
map_fn,
cycle_length=3,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=0))
dataset = dataset.batch(32)
results = []
for _ in range(2):
elements = []
next_element = self.getNext(dataset)
try:
while True:
elements.extend(self.evaluate(next_element()))
except errors.OutOfRangeError:
pass
results.append(elements)
self.assertAllEqual(results[0], results[1])
if __name__ == "__main__":
test.main()
|
{
"content_hash": "d2144e7a3def939bbf9e1e1e325d8acd",
"timestamp": "",
"source": "github",
"line_count": 734,
"max_line_length": 80,
"avg_line_length": 38.42234332425068,
"alnum_prop": 0.6264803914615985,
"repo_name": "theflofly/tensorflow",
"id": "9d535316619db395853e83e3c1b2a740965b9f7d",
"size": "28891",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/experimental/kernel_tests/parallel_interleave_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "644154"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59546729"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1507157"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908330"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94633"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15108"
},
{
"name": "Pascal",
"bytes": "770"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46310564"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "481712"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
}
|
"""Dependencies vendoring helpers."""
import os.path
import site
import sys
PYTHON_VERSION = 'python%d.%d' % (sys.version_info[0], sys.version_info[1])
def add(path, index=1):
"""Insert site dir or virtualenv at a given index in sys.path.
Args:
path: relative path to a site dir or virtualenv.
index: sys.path position to insert the site dir.
Raises:
ValueError: path doesn't exist.
"""
venv_path = os.path.join(path, 'lib', PYTHON_VERSION, 'site-packages')
if os.path.isdir(venv_path):
site_dir = venv_path
elif os.path.isdir(path):
site_dir = path
else:
raise ValueError('virtualenv: cannot access %s: '
'No such virtualenv or site directory' % path)
sys_path = sys.path[:]
del sys.path[index:]
site.addsitedir(site_dir)
sys.path.extend(sys_path[index:])
|
{
"content_hash": "31818c55d0c08bab60b38c8f65deb8e0",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 75,
"avg_line_length": 23.885714285714286,
"alnum_prop": 0.6602870813397129,
"repo_name": "Serag8/Bachelor",
"id": "58a23f67bff123105ddbc49b50f67d2d7a56c54e",
"size": "1437",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "google_appengine/google/appengine/ext/vendor/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5005"
},
{
"name": "C",
"bytes": "407880"
},
{
"name": "CSS",
"bytes": "218414"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "HTML",
"bytes": "611572"
},
{
"name": "JavaScript",
"bytes": "2948926"
},
{
"name": "Makefile",
"bytes": "13710"
},
{
"name": "PHP",
"bytes": "2556595"
},
{
"name": "Python",
"bytes": "39016417"
},
{
"name": "Shell",
"bytes": "3680"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
}
|
"""Contains classes for SymmetricDelete."""
from sparknlp.common import *
from sparknlp.common.annotator_type import AnnotatorType
class SymmetricDeleteApproach(AnnotatorApproach):
"""Trains a Symmetric Delete spelling correction algorithm. Retrieves tokens
and utilizes distance metrics to compute possible derived words.
The Symmetric Delete spelling correction algorithm reduces the complexity of edit
candidate generation and dictionary lookup for a given Damerau-Levenshtein distance.
It is six orders of magnitude faster (than the standard approach with deletes +
transposes + replaces + inserts) and language independent.
A dictionary of correct spellings must be provided with :meth:`.setDictionary` in
the form of a text file, where each word is parsed by a regex pattern.
For instantiated/pretrained models, see :class:`.SymmetricDeleteModel`.
====================== ======================
Input Annotation types Output Annotation type
====================== ======================
``TOKEN`` ``TOKEN``
====================== ======================
Parameters
----------
dictionary
folder or file with text that teaches about the language
maxEditDistance
max edit distance characters to derive strings from a word, by default 3
frequencyThreshold
minimum frequency of words to be considered from training, by default 0
deletesThreshold
minimum frequency of corrections a word needs to have to be considered
from training, by default 0
References
----------
Inspired by `SymSpell <https://github.com/wolfgarbe/SymSpell>`__.
Examples
--------
In this example, the dictionary ``"words.txt"`` has the form of::
...
gummy
gummic
gummier
gummiest
gummiferous
...
This dictionary is then set to be the basis of the spell checker.
>>> import sparknlp
>>> from sparknlp.base import *
>>> from sparknlp.annotator import *
>>> from pyspark.ml import Pipeline
>>> documentAssembler = DocumentAssembler() \\
... .setInputCol("text") \\
... .setOutputCol("document")
>>> tokenizer = Tokenizer() \\
... .setInputCols(["document"]) \\
... .setOutputCol("token")
>>> spellChecker = SymmetricDeleteApproach() \\
... .setInputCols(["token"]) \\
... .setOutputCol("spell") \\
... .setDictionary("src/test/resources/spell/words.txt")
>>> pipeline = Pipeline().setStages([
... documentAssembler,
... tokenizer,
... spellChecker
... ])
>>> pipelineModel = pipeline.fit(trainingData)
See Also
--------
NorvigSweetingApproach : for an alternative approach to spell checking
ContextSpellCheckerApproach : for a DL based approach
"""
inputAnnotatorTypes = [AnnotatorType.TOKEN]
corpus = Param(Params._dummy(),
"corpus",
"folder or file with text that teaches about the language",
typeConverter=TypeConverters.identity)
dictionary = Param(Params._dummy(),
"dictionary",
"folder or file with text that teaches about the language",
typeConverter=TypeConverters.identity)
maxEditDistance = Param(Params._dummy(),
"maxEditDistance",
"max edit distance characters to derive strings from a word",
typeConverter=TypeConverters.toInt)
frequencyThreshold = Param(Params._dummy(),
"frequencyThreshold",
"minimum frequency of words to be considered from training. " +
"Increase if training set is LARGE. Defaults to 0",
typeConverter=TypeConverters.toInt)
deletesThreshold = Param(Params._dummy(),
"deletesThreshold",
"minimum frequency of corrections a word needs to have to be considered from training." +
"Increase if training set is LARGE. Defaults to 0",
typeConverter=TypeConverters.toInt)
dupsLimit = Param(Params._dummy(),
"dupsLimit",
"maximum duplicate of characters in a word to consider. Defaults to 2",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self):
super(SymmetricDeleteApproach, self).__init__(
classname="com.johnsnowlabs.nlp.annotators.spell.symmetric.SymmetricDeleteApproach")
self._setDefault(maxEditDistance=3, frequencyThreshold=0, deletesThreshold=0, dupsLimit=2)
self.dictionary_path = ""
def setDictionary(self, path, token_pattern="\S+", read_as=ReadAs.TEXT, options={"format": "text"}):
"""Sets folder or file with text that teaches about the language.
Parameters
----------
path : str
Path to the resource
token_pattern : str, optional
Regex patttern to extract tokens, by default "\S+"
read_as : str, optional
How to read the resource, by default ReadAs.TEXT
options : dict, optional
Options for reading the resource, by default {"format": "text"}
"""
self.dictionary_path = path
opts = options.copy()
if "tokenPattern" not in opts:
opts["tokenPattern"] = token_pattern
return self._set(dictionary=ExternalResource(path, read_as, opts))
def setMaxEditDistance(self, v):
"""Sets max edit distance characters to derive strings from a word, by
default 3.
Parameters
----------
v : int
Max edit distance characters to derive strings from a word
"""
return self._set(maxEditDistance=v)
def setFrequencyThreshold(self, v):
"""Sets minimum frequency of words to be considered from training, by
default 0.
Parameters
----------
v : int
Minimum frequency of words to be considered from training
"""
return self._set(frequencyThreshold=v)
def setDeletesThreshold(self, v):
"""Sets minimum frequency of corrections a word needs to have to be
considered from training, by default 0.
Parameters
----------
v : int
Minimum frequency of corrections a word needs to have to be
considered from training
"""
return self._set(deletesThreshold=v)
def _create_model(self, java_model):
return SymmetricDeleteModel(java_model=java_model)
class SymmetricDeleteModel(AnnotatorModel):
"""Symmetric Delete spelling correction algorithm.
The Symmetric Delete spelling correction algorithm reduces the complexity of
edit candidate generation and dictionary lookup for a given
Damerau-Levenshtein distance. It is six orders of magnitude faster (than the
standard approach with deletes + transposes + replaces + inserts) and
language independent.
Pretrained models can be loaded with :meth:`.pretrained` of the companion
object:
>>> spell = SymmetricDeleteModel.pretrained() \\
... .setInputCols(["token"]) \\
... .setOutputCol("spell")
The default model is ``"spellcheck_sd"``, if no name is provided. For
available pretrained models please see the `Models Hub
<https://nlp.johnsnowlabs.com/models?task=Spell+Check>`__.
====================== ======================
Input Annotation types Output Annotation type
====================== ======================
``TOKEN`` ``TOKEN``
====================== ======================
Parameters
----------
None
References
----------
Inspired by `SymSpell <https://github.com/wolfgarbe/SymSpell>`__.
Examples
--------
>>> import sparknlp
>>> from sparknlp.base import *
>>> from sparknlp.annotator import *
>>> from pyspark.ml import Pipeline
>>> documentAssembler = DocumentAssembler() \\
... .setInputCol("text") \\
... .setOutputCol("document")
>>> tokenizer = Tokenizer() \\
... .setInputCols(["document"]) \\
... .setOutputCol("token")
>>> spellChecker = SymmetricDeleteModel.pretrained() \\
... .setInputCols(["token"]) \\
... .setOutputCol("spell")
>>> pipeline = Pipeline().setStages([
... documentAssembler,
... tokenizer,
... spellChecker
... ])
>>> data = spark.createDataFrame([["spmetimes i wrrite wordz erong."]]).toDF("text")
>>> result = pipeline.fit(data).transform(data)
>>> result.select("spell.result").show(truncate=False)
+--------------------------------------+
|result |
+--------------------------------------+
|[sometimes, i, write, words, wrong, .]|
+--------------------------------------+
See Also
--------
NorvigSweetingModel : for an alternative approach to spell checking
ContextSpellCheckerModel : for a DL based approach
"""
name = "SymmetricDeleteModel"
inputAnnotatorTypes = [AnnotatorType.TOKEN]
def __init__(self, classname="com.johnsnowlabs.nlp.annotators.spell.symmetric.SymmetricDeleteModel",
java_model=None):
super(SymmetricDeleteModel, self).__init__(
classname=classname,
java_model=java_model
)
@staticmethod
def pretrained(name="spellcheck_sd", lang="en", remote_loc=None):
"""Downloads and loads a pretrained model.
Parameters
----------
name : str, optional
Name of the pretrained model, by default "spellcheck_sd"
lang : str, optional
Language of the pretrained model, by default "en"
remote_loc : str, optional
Optional remote address of the resource, by default None. Will use
Spark NLPs repositories otherwise.
Returns
-------
SymmetricDeleteModel
The restored model
"""
from sparknlp.pretrained import ResourceDownloader
return ResourceDownloader.downloadModel(SymmetricDeleteModel, name, lang, remote_loc)
|
{
"content_hash": "4d38d82fef59cb4da2d778b15072dc06",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 118,
"avg_line_length": 36.879858657243815,
"alnum_prop": 0.5873335249592795,
"repo_name": "JohnSnowLabs/spark-nlp",
"id": "8ec2d74081188fe5b24217065943e04491ad0253",
"size": "11029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/sparknlp/annotator/spell_check/symmetric_delete.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "14452"
},
{
"name": "Java",
"bytes": "223289"
},
{
"name": "Makefile",
"bytes": "819"
},
{
"name": "Python",
"bytes": "1694517"
},
{
"name": "Scala",
"bytes": "4116435"
},
{
"name": "Shell",
"bytes": "5286"
}
],
"symlink_target": ""
}
|
import env
import mongoengine
from openrunlog import models
def rebuild_indexes():
collection_user = models.User._get_collection()
collection_run = models.Run._get_collection()
collection_user.update({}, {"$unset": {"_types": 1}}, multi=True)
collection_run.update({}, {"$unset": {"_types": 1}}, multi=True)
# Confirm extra data is removed
count = collection_user.find({'_types': {"$exists": True}}).count()
assert count == 0
count = collection_run.find({'_types': {"$exists": True}}).count()
assert count == 0
# Remove indexes
info = collection_user.index_information()
indexes_to_drop = [key for key, value in info.iteritems()
if '_types' in dict(value['key'])]
for index in indexes_to_drop:
collection_user.drop_index(index)
info = collection_run.index_information()
indexes_to_drop = [key for key, value in info.iteritems()
if '_types' in dict(value['key'])]
for index in indexes_to_drop:
collection_run.drop_index(index)
# Recreate indexes
models.User.ensure_indexes()
models.Run.ensure_indexes()
def fix_reference_fields():
for r in models.Run.objects:
r._mark_as_changed('user')
r.save()
for w in models.Week.objects:
w._mark_as_changed('user')
w.save()
for g in models.Group.objects:
g._mark_as_changed('admins')
g._mark_as_changed('members')
g.save()
def flush_redis_keys():
import redis
r = redis.StrictRedis(host='localhost', port=6379)
r.flushdb()
if __name__ == '__main__':
config = env.prefix('ORL_')
if config['debug'] == 'True':
config['debug'] = True
else:
config['debug'] = False
mongoengine.connect(
config['db_name'],
host=config['db_uri'])
models.User.ensure_indexes()
models.Run.ensure_indexes()
rebuild_indexes()
fix_reference_fields()
flush_redis_keys()
|
{
"content_hash": "c028aeb21ddc7c2b4f9fa52359159382",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 71,
"avg_line_length": 26.44,
"alnum_prop": 0.6016137165910237,
"repo_name": "JsonChiu/openrunlog",
"id": "4cf502460f4c6349153ec2f0668671ba4d67908f",
"size": "1983",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openrunlog/scripts/migrations/mongoengine07to08.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "749"
},
{
"name": "HTML",
"bytes": "38395"
},
{
"name": "JavaScript",
"bytes": "71398"
},
{
"name": "Nginx",
"bytes": "1743"
},
{
"name": "Python",
"bytes": "99245"
},
{
"name": "Shell",
"bytes": "158"
}
],
"symlink_target": ""
}
|
import datetime
import random
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import timeutils
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.common import constants
from neutron.common import utils
from neutron import context as ncontext
from neutron.db import agents_db
from neutron.db import model_base
from neutron.extensions import agent as ext_agent
from neutron.extensions import dhcpagentscheduler
from neutron.i18n import _LE, _LI, _LW
LOG = logging.getLogger(__name__)
AGENTS_SCHEDULER_OPTS = [
cfg.StrOpt('network_scheduler_driver',
default='neutron.scheduler.'
'dhcp_agent_scheduler.WeightScheduler',
help=_('Driver to use for scheduling network to DHCP agent')),
cfg.BoolOpt('network_auto_schedule', default=True,
help=_('Allow auto scheduling networks to DHCP agent.')),
cfg.BoolOpt('allow_automatic_dhcp_failover', default=True,
help=_('Automatically remove networks from offline DHCP '
'agents.')),
cfg.IntOpt('dhcp_agents_per_network', default=1,
help=_('Number of DHCP agents scheduled to host a tenant '
'network. If this number is greater than 1, the '
'scheduler automatically assigns multiple DHCP agents '
'for a given tenant network, providing high '
'availability for DHCP service.')),
cfg.BoolOpt('enable_services_on_agents_with_admin_state_down',
default=False,
help=_('Enable services on an agent with admin_state_up '
'False. If this option is False, when admin_state_up '
'of an agent is turned False, services on it will be '
'disabled. Agents with admin_state_up False are not '
'selected for automatic scheduling regardless of this '
'option. But manual scheduling to such agents is '
'available if this option is True.')),
]
cfg.CONF.register_opts(AGENTS_SCHEDULER_OPTS)
class NetworkDhcpAgentBinding(model_base.BASEV2):
"""Represents binding between neutron networks and DHCP agents."""
network_id = sa.Column(sa.String(36),
sa.ForeignKey("networks.id", ondelete='CASCADE'),
primary_key=True)
dhcp_agent = orm.relation(agents_db.Agent)
dhcp_agent_id = sa.Column(sa.String(36),
sa.ForeignKey("agents.id",
ondelete='CASCADE'),
primary_key=True)
class AgentSchedulerDbMixin(agents_db.AgentDbMixin):
"""Common class for agent scheduler mixins."""
# agent notifiers to handle agent update operations;
# should be updated by plugins;
agent_notifiers = {
constants.AGENT_TYPE_DHCP: None,
constants.AGENT_TYPE_L3: None,
constants.AGENT_TYPE_LOADBALANCER: None,
}
@staticmethod
def is_eligible_agent(active, agent):
if active is None:
# filtering by activeness is disabled, all agents are eligible
return True
else:
# note(rpodolyaka): original behaviour is saved here: if active
# filter is set, only agents which are 'up'
# (i.e. have a recent heartbeat timestamp)
# are eligible, even if active is False
return not agents_db.AgentDbMixin.is_agent_down(
agent['heartbeat_timestamp'])
def update_agent(self, context, id, agent):
original_agent = self.get_agent(context, id)
result = super(AgentSchedulerDbMixin, self).update_agent(
context, id, agent)
agent_data = agent['agent']
agent_notifier = self.agent_notifiers.get(original_agent['agent_type'])
if (agent_notifier and
'admin_state_up' in agent_data and
original_agent['admin_state_up'] != agent_data['admin_state_up']):
agent_notifier.agent_updated(context,
agent_data['admin_state_up'],
original_agent['host'])
return result
def add_agent_status_check(self, function):
loop = loopingcall.FixedIntervalLoopingCall(function)
# TODO(enikanorov): make interval configurable rather than computed
interval = max(cfg.CONF.agent_down_time // 2, 1)
# add random initial delay to allow agents to check in after the
# neutron server first starts. random to offset multiple servers
initial_delay = random.randint(interval, interval * 2)
loop.start(interval=interval, initial_delay=initial_delay)
if hasattr(self, 'periodic_agent_loops'):
self.periodic_agent_loops.append(loop)
else:
self.periodic_agent_loops = [loop]
def agent_dead_limit_seconds(self):
return cfg.CONF.agent_down_time * 2
def wait_down_agents(self, agent_type, agent_dead_limit):
"""Gives chance for agents to send a heartbeat."""
# check for an abrupt clock change since last check. if a change is
# detected, sleep for a while to let the agents check in.
tdelta = timeutils.utcnow() - getattr(self, '_clock_jump_canary',
timeutils.utcnow())
if timeutils.total_seconds(tdelta) > cfg.CONF.agent_down_time:
LOG.warn(_LW("Time since last %s agent reschedule check has "
"exceeded the interval between checks. Waiting "
"before check to allow agents to send a heartbeat "
"in case there was a clock adjustment."), agent_type)
time.sleep(agent_dead_limit)
self._clock_jump_canary = timeutils.utcnow()
def get_cutoff_time(self, agent_dead_limit):
cutoff = timeutils.utcnow() - datetime.timedelta(
seconds=agent_dead_limit)
return cutoff
class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
.DhcpAgentSchedulerPluginBase,
AgentSchedulerDbMixin):
"""Mixin class to add DHCP agent scheduler extension to db_base_plugin_v2.
"""
network_scheduler = None
def start_periodic_dhcp_agent_status_check(self):
if not cfg.CONF.allow_automatic_dhcp_failover:
LOG.info(_LI("Skipping periodic DHCP agent status check because "
"automatic network rescheduling is disabled."))
return
self.add_agent_status_check(self.remove_networks_from_down_agents)
def is_eligible_agent(self, context, active, agent):
# eligible agent is active or starting up
return (AgentSchedulerDbMixin.is_eligible_agent(active, agent) or
self.agent_starting_up(context, agent))
def agent_starting_up(self, context, agent):
"""Check if agent was just started.
Method returns True if agent is in its 'starting up' period.
Return value depends on amount of networks assigned to the agent.
It doesn't look at latest heartbeat timestamp as it is assumed
that this method is called for agents that are considered dead.
"""
agent_dead_limit = datetime.timedelta(
seconds=self.agent_dead_limit_seconds())
network_count = (context.session.query(NetworkDhcpAgentBinding).
filter_by(dhcp_agent_id=agent['id']).count())
# amount of networks assigned to agent affect amount of time we give
# it so startup. Tests show that it's more or less sage to assume
# that DHCP agent processes each network in less than 2 seconds.
# So, give it this additional time for each of the networks.
additional_time = datetime.timedelta(seconds=2 * network_count)
LOG.debug("Checking if agent starts up and giving it additional %s",
additional_time)
agent_expected_up = (agent['started_at'] + agent_dead_limit +
additional_time)
return agent_expected_up > timeutils.utcnow()
def _schedule_network(self, context, network_id, dhcp_notifier):
LOG.info(_LI("Scheduling unhosted network %s"), network_id)
try:
# TODO(enikanorov): have to issue redundant db query
# to satisfy scheduling interface
network = self.get_network(context, network_id)
agents = self.schedule_network(context, network)
if not agents:
LOG.info(_LI("Failed to schedule network %s, "
"no eligible agents or it might be "
"already scheduled by another server"),
network_id)
return
if not dhcp_notifier:
return
for agent in agents:
LOG.info(_LI("Adding network %(net)s to agent "
"%(agent)s on host %(host)s"),
{'net': network_id,
'agent': agent.id,
'host': agent.host})
dhcp_notifier.network_added_to_agent(
context, network_id, agent.host)
except Exception:
# catching any exception during scheduling
# so if _schedule_network is invoked in the loop it could
# continue in any case
LOG.exception(_LE("Failed to schedule network %s"), network_id)
def _filter_bindings(self, context, bindings):
"""Skip bindings for which the agent is dead, but starting up."""
# to save few db calls: store already checked agents in dict
# id -> is_agent_starting_up
checked_agents = {}
for binding in bindings:
try:
agent_id = binding.dhcp_agent['id']
if agent_id not in checked_agents:
if self.agent_starting_up(context, binding.dhcp_agent):
# When agent starts and it has many networks to process
# it may fail to send state reports in defined interval
# The server will consider it dead and try to remove
# networks from it.
checked_agents[agent_id] = True
LOG.debug("Agent %s is starting up, skipping",
agent_id)
else:
checked_agents[agent_id] = False
if not checked_agents[agent_id]:
yield binding
except exc.ObjectDeletedError:
# we're not within a transaction, so object can be lost
# because underlying row is removed, just ignore this issue
LOG.debug("binding was removed concurrently, skipping it")
def remove_networks_from_down_agents(self):
"""Remove networks from down DHCP agents if admin state is up.
Reschedule them if configured so.
"""
agent_dead_limit = self.agent_dead_limit_seconds()
self.wait_down_agents('DHCP', agent_dead_limit)
cutoff = self.get_cutoff_time(agent_dead_limit)
context = ncontext.get_admin_context()
down_bindings = (
context.session.query(NetworkDhcpAgentBinding).
join(agents_db.Agent).
filter(agents_db.Agent.heartbeat_timestamp < cutoff,
agents_db.Agent.admin_state_up))
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
try:
dead_bindings = [b for b in
self._filter_bindings(context, down_bindings)]
agents = self.get_agents_db(
context, {'agent_type': [constants.AGENT_TYPE_DHCP]})
active_agents = [agent for agent in agents if
self.is_eligible_agent(context, True, agent)]
if not active_agents:
LOG.warn(_LW("No DHCP agents available, "
"skipping rescheduling"))
return
for binding in dead_bindings:
LOG.warn(_LW("Removing network %(network)s from agent "
"%(agent)s because the agent did not report "
"to the server in the last %(dead_time)s "
"seconds."),
{'network': binding.network_id,
'agent': binding.dhcp_agent_id,
'dead_time': agent_dead_limit})
# save binding object to avoid ObjectDeletedError
# in case binding is concurrently deleted from the DB
saved_binding = {'net': binding.network_id,
'agent': binding.dhcp_agent_id}
try:
# do not notify agent if it considered dead
# so when it is restarted it won't see network delete
# notifications on its queue
self.remove_network_from_dhcp_agent(context,
binding.dhcp_agent_id,
binding.network_id,
notify=False)
except dhcpagentscheduler.NetworkNotHostedByDhcpAgent:
# measures against concurrent operation
LOG.debug("Network %(net)s already removed from DHCP "
"agent %(agent)s",
saved_binding)
# still continue and allow concurrent scheduling attempt
except Exception:
LOG.exception(_LE("Unexpected exception occurred while "
"removing network %(net)s from agent "
"%(agent)s"),
saved_binding)
if cfg.CONF.network_auto_schedule:
self._schedule_network(
context, saved_binding['net'], dhcp_notifier)
except Exception:
# we want to be thorough and catch whatever is raised
# to avoid loop abortion
LOG.exception(_LE("Exception encountered during network "
"rescheduling"))
def get_dhcp_agents_hosting_networks(
self, context, network_ids, active=None, admin_state_up=None):
if not network_ids:
return []
query = context.session.query(NetworkDhcpAgentBinding)
query = query.options(orm.contains_eager(
NetworkDhcpAgentBinding.dhcp_agent))
query = query.join(NetworkDhcpAgentBinding.dhcp_agent)
if len(network_ids) == 1:
query = query.filter(
NetworkDhcpAgentBinding.network_id == network_ids[0])
elif network_ids:
query = query.filter(
NetworkDhcpAgentBinding.network_id in network_ids)
if admin_state_up is not None:
query = query.filter(agents_db.Agent.admin_state_up ==
admin_state_up)
return [binding.dhcp_agent
for binding in query
if self.is_eligible_agent(context, active,
binding.dhcp_agent)]
def add_network_to_dhcp_agent(self, context, id, network_id):
self._get_network(context, network_id)
with context.session.begin(subtransactions=True):
agent_db = self._get_agent(context, id)
if (agent_db['agent_type'] != constants.AGENT_TYPE_DHCP or
not services_available(agent_db['admin_state_up'])):
raise dhcpagentscheduler.InvalidDHCPAgent(id=id)
dhcp_agents = self.get_dhcp_agents_hosting_networks(
context, [network_id])
for dhcp_agent in dhcp_agents:
if id == dhcp_agent.id:
raise dhcpagentscheduler.NetworkHostedByDHCPAgent(
network_id=network_id, agent_id=id)
binding = NetworkDhcpAgentBinding()
binding.dhcp_agent_id = id
binding.network_id = network_id
context.session.add(binding)
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
if dhcp_notifier:
dhcp_notifier.network_added_to_agent(
context, network_id, agent_db.host)
def remove_network_from_dhcp_agent(self, context, id, network_id,
notify=True):
agent = self._get_agent(context, id)
with context.session.begin(subtransactions=True):
try:
query = context.session.query(NetworkDhcpAgentBinding)
query = query.filter(
NetworkDhcpAgentBinding.network_id == network_id,
NetworkDhcpAgentBinding.dhcp_agent_id == id)
# just ensure the binding exists
query.one()
except exc.NoResultFound:
raise dhcpagentscheduler.NetworkNotHostedByDhcpAgent(
network_id=network_id, agent_id=id)
# reserve the port, so the ip is reused on a subsequent add
device_id = utils.get_dhcp_agent_device_id(network_id,
agent['host'])
filters = dict(device_id=[device_id])
ports = self.get_ports(context, filters=filters)
for port in ports:
port['device_id'] = constants.DEVICE_ID_RESERVED_DHCP_PORT
self.update_port(context, port['id'], dict(port=port))
# avoid issues with query.one() object that was
# loaded into the session
query.delete(synchronize_session=False)
if not notify:
return
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
if dhcp_notifier:
dhcp_notifier.network_removed_from_agent(
context, network_id, agent.host)
def list_networks_on_dhcp_agent(self, context, id):
query = context.session.query(NetworkDhcpAgentBinding.network_id)
query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == id)
net_ids = [item[0] for item in query]
if net_ids:
return {'networks':
self.get_networks(context, filters={'id': net_ids})}
else:
# Exception will be thrown if the requested agent does not exist.
self._get_agent(context, id)
return {'networks': []}
def list_active_networks_on_active_dhcp_agent(self, context, host):
try:
agent = self._get_agent_by_type_and_host(
context, constants.AGENT_TYPE_DHCP, host)
except ext_agent.AgentNotFoundByTypeHost:
LOG.debug("DHCP Agent not found on host %s", host)
return []
if not services_available(agent.admin_state_up):
return []
query = context.session.query(NetworkDhcpAgentBinding.network_id)
query = query.filter(NetworkDhcpAgentBinding.dhcp_agent_id == agent.id)
net_ids = [item[0] for item in query]
if net_ids:
return self.get_networks(
context,
filters={'id': net_ids, 'admin_state_up': [True]}
)
else:
return []
def list_dhcp_agents_hosting_network(self, context, network_id):
dhcp_agents = self.get_dhcp_agents_hosting_networks(
context, [network_id])
agent_ids = [dhcp_agent.id for dhcp_agent in dhcp_agents]
if agent_ids:
return {
'agents': self.get_agents(context, filters={'id': agent_ids})}
else:
return {'agents': []}
def schedule_network(self, context, created_network):
if self.network_scheduler:
return self.network_scheduler.schedule(
self, context, created_network)
def auto_schedule_networks(self, context, host):
if self.network_scheduler:
self.network_scheduler.auto_schedule_networks(self, context, host)
# helper functions for readability.
def services_available(admin_state_up):
if cfg.CONF.enable_services_on_agents_with_admin_state_down:
# Services are available regardless admin_state_up
return True
return admin_state_up
def get_admin_state_up_filter():
if cfg.CONF.enable_services_on_agents_with_admin_state_down:
# Avoid filtering on admin_state_up at all
return None
# Filters on admin_state_up is True
return True
|
{
"content_hash": "5462ca667c7e0e3c22b70b3dac71c0c1",
"timestamp": "",
"source": "github",
"line_count": 460,
"max_line_length": 79,
"avg_line_length": 45.89565217391304,
"alnum_prop": 0.5714759378552482,
"repo_name": "chitr/neutron",
"id": "924cdb41699ba5f846ec3330b369640ab0cdc317",
"size": "21753",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "neutron/db/agentschedulers_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "7647002"
},
{
"name": "Shell",
"bytes": "13342"
}
],
"symlink_target": ""
}
|
"""
License: BSD
(c) 2009-2013 ::: www.CodeResort.com - BV Network AS (simon-code@bvnetwork.no)
"""
import unittest
import xmlrpclib
import os
import shutil
import datetime
import time
from tracrpc.xml_rpc import to_xmlrpc_datetime
from tracrpc.tests import rpc_testenv, TracRpcTestCase
class RpcTicketTestCase(TracRpcTestCase):
def setUp(self):
TracRpcTestCase.setUp(self)
self.anon = xmlrpclib.ServerProxy(rpc_testenv.url_anon)
self.user = xmlrpclib.ServerProxy(rpc_testenv.url_user)
self.admin = xmlrpclib.ServerProxy(rpc_testenv.url_admin)
def tearDown(self):
TracRpcTestCase.tearDown(self)
def test_create_get_delete(self):
tid = self.admin.ticket.create("create_get_delete", "fooy", {})
tid, time_created, time_changed, attributes = self.admin.ticket.get(tid)
self.assertEquals('fooy', attributes['description'])
self.assertEquals('create_get_delete', attributes['summary'])
self.assertEquals('new', attributes['status'])
self.assertEquals('admin', attributes['reporter'])
self.admin.ticket.delete(tid)
def test_getActions(self):
tid = self.admin.ticket.create("ticket_getActions", "kjsald", {})
actions = self.admin.ticket.getActions(tid)
default = [['leave', 'leave', '.', []], ['resolve', 'resolve',
"The resolution will be set. Next status will be 'closed'.",
[['action_resolve_resolve_resolution', 'fixed',
['fixed', 'invalid', 'wontfix', 'duplicate', 'worksforme']]]],
['reassign', 'reassign',
"The owner will change from (none). Next status will be 'assigned'.",
[['action_reassign_reassign_owner', 'admin', []]]],
['accept', 'accept',
"The owner will change from (none) to admin. Next status will be 'accepted'.", []]]
# Adjust for trac:changeset:9041
if 'will be changed' in actions[2][2]:
default[2][2] = default[2][2].replace('will change', 'will be changed')
default[3][2] = default[3][2].replace('will change', 'will be changed')
# Adjust for trac:changeset:11777
if not 'from (none).' in actions[2][2]:
default[2][2] = default[2][2].replace('from (none).',
'from (none) to the specified user.')
# Adjust for trac:changeset:11778
if actions[0][2] != '.':
default[0][2] = 'The ticket will remain with no owner.'
self.assertEquals(actions, default)
self.admin.ticket.delete(tid)
def test_getAvailableActions_DeleteTicket(self):
# http://trac-hacks.org/ticket/5387
tid = self.admin.ticket.create('abc', 'def', {})
self.assertEquals(False,
'delete' in self.admin.ticket.getAvailableActions(tid))
env = rpc_testenv.get_trac_environment()
delete_plugin = os.path.join(rpc_testenv.tracdir,
'plugins', 'DeleteTicket.py')
shutil.copy(os.path.join(
rpc_testenv.trac_src, 'sample-plugins', 'workflow', 'DeleteTicket.py'),
delete_plugin)
env.config.set('ticket', 'workflow',
'ConfigurableTicketWorkflow,DeleteTicketActionController')
env.config.save()
self.assertEquals(True,
'delete' in self.admin.ticket.getAvailableActions(tid))
self.assertEquals(False,
'delete' in self.user.ticket.getAvailableActions(tid))
env.config.set('ticket', 'workflow',
'ConfigurableTicketWorkflow')
env.config.save()
rpc_testenv.restart()
self.assertEquals(False,
'delete' in self.admin.ticket.getAvailableActions(tid))
# Clean up
os.unlink(delete_plugin)
rpc_testenv.restart()
self.assertEquals(0, self.admin.ticket.delete(tid))
def test_FineGrainedSecurity(self):
self.assertEquals(1, self.admin.ticket.create('abc', '123', {}))
self.assertEquals(2, self.admin.ticket.create('def', '456', {}))
# First some non-restricted tests for comparison:
self.assertRaises(xmlrpclib.Fault, self.anon.ticket.create, 'abc', 'def')
self.assertEquals([1,2], self.user.ticket.query())
self.assertTrue(self.user.ticket.get(2))
self.assertTrue(self.user.ticket.update(1, "ok"))
self.assertTrue(self.user.ticket.update(2, "ok"))
# Enable security policy and test
from trac.core import Component, implements
from trac.perm import IPermissionPolicy
policy = os.path.join(rpc_testenv.tracdir, 'plugins', 'TicketPolicy.py')
open(policy, 'w').write(
"from trac.core import *\n"
"from trac.perm import IPermissionPolicy\n"
"class TicketPolicy(Component):\n"
" implements(IPermissionPolicy)\n"
" def check_permission(self, action, username, resource, perm):\n"
" if username == 'user' and resource and resource.id == 2:\n"
" return False\n"
" if username == 'anonymous' and action == 'TICKET_CREATE':\n"
" return True\n")
env = rpc_testenv.get_trac_environment()
_old_conf = env.config.get('trac', 'permission_policies')
env.config.set('trac', 'permission_policies', 'TicketPolicy,'+_old_conf)
env.config.save()
rpc_testenv.restart()
self.assertEquals([1], self.user.ticket.query())
self.assertTrue(self.user.ticket.get(1))
self.assertRaises(xmlrpclib.Fault, self.user.ticket.get, 2)
self.assertTrue(self.user.ticket.update(1, "ok"))
self.assertRaises(xmlrpclib.Fault, self.user.ticket.update, 2, "not ok")
self.assertEquals(3, self.anon.ticket.create('efg', '789', {}))
# Clean, reset and simple verification
env.config.set('trac', 'permission_policies', _old_conf)
env.config.save()
os.unlink(policy)
rpc_testenv.restart()
self.assertEquals([1,2,3], self.user.ticket.query())
self.assertEquals(0, self.admin.ticket.delete(1))
self.assertEquals(0, self.admin.ticket.delete(2))
self.assertEquals(0, self.admin.ticket.delete(3))
def test_getRecentChanges(self):
tid1 = self.admin.ticket.create("ticket_getRecentChanges", "one", {})
time.sleep(1)
tid2 = self.admin.ticket.create("ticket_getRecentChanges", "two", {})
_id, created, modified, attributes = self.admin.ticket.get(tid2)
changes = self.admin.ticket.getRecentChanges(created)
try:
self.assertEquals(changes, [tid2])
finally:
self.admin.ticket.delete(tid1)
self.admin.ticket.delete(tid2)
def test_query_group_order_col(self):
t1 = self.admin.ticket.create("1", "",
{'type': 'enhancement', 'owner': 'A'})
t2 = self.admin.ticket.create("2", "", {'type': 'task', 'owner': 'B'})
t3 = self.admin.ticket.create("3", "", {'type': 'defect', 'owner': 'A'})
# order
self.assertEquals([3,1,2], self.admin.ticket.query("order=type"))
self.assertEquals([1,3,2], self.admin.ticket.query("order=owner"))
self.assertEquals([2,1,3],
self.admin.ticket.query("order=owner&desc=1"))
# group
self.assertEquals([1,3,2], self.admin.ticket.query("group=owner"))
self.assertEquals([2,1,3],
self.admin.ticket.query("group=owner&groupdesc=1"))
# group + order
self.assertEquals([2,3,1],
self.admin.ticket.query("group=owner&groupdesc=1&order=type"))
# col should just be ignored
self.assertEquals([3,1,2],
self.admin.ticket.query("order=type&col=status&col=reporter"))
# clean
self.assertEquals(0, self.admin.ticket.delete(t1))
self.assertEquals(0, self.admin.ticket.delete(t2))
self.assertEquals(0, self.admin.ticket.delete(t3))
def test_query_special_character_escape(self):
# Note: This test only passes when using Trac 0.12+
# See http://trac-hacks.org/ticket/7737
if __import__('trac').__version__ < '0.12':
self.fail("Known issue: Trac 0.11 does not handle escaped input properly.")
summary = ["here&now", "maybe|later", "back\slash"]
search = ["here\&now", "maybe\|later", "back\\slash"]
tids = []
for s in summary:
tids.append(self.admin.ticket.create(s,
"test_special_character_escape", {}))
try:
for i in range(0, 3):
self.assertEquals([tids[i]],
self.admin.ticket.query("summary=%s" % search[i]))
self.assertEquals(tids.sort(),
self.admin.ticket.query("summary=%s" % "|".join(search)).sort())
finally:
for tid in tids:
self.admin.ticket.delete(tid)
def test_update_author(self):
tid = self.admin.ticket.create("ticket_update_author", "one", {})
self.admin.ticket.update(tid, 'comment1', {})
time.sleep(1)
self.admin.ticket.update(tid, 'comment2', {}, False, 'foo')
time.sleep(1)
self.user.ticket.update(tid, 'comment3', {}, False, 'should_be_rejected')
changes = self.admin.ticket.changeLog(tid)
self.assertEquals(3, len(changes))
for when, who, what, cnum, comment, _tid in changes:
self.assertTrue(comment in ('comment1', 'comment2', 'comment3'))
if comment == 'comment1':
self.assertEquals('admin', who)
if comment == 'comment2':
self.assertEquals('foo', who)
if comment == 'comment3':
self.assertEquals('user', who)
self.admin.ticket.delete(tid)
def test_create_at_time(self):
from tracrpc.util import to_datetime, utc
now = to_datetime(None, utc)
minus1 = to_xmlrpc_datetime(now - datetime.timedelta(days=1))
# create the tickets (user ticket will not be permitted to change time)
one = self.admin.ticket.create("create_at_time1", "ok", {}, False,
minus1)
two = self.user.ticket.create("create_at_time3", "ok", {}, False,
minus1)
# get the tickets
t1 = self.admin.ticket.get(one)
t2 = self.admin.ticket.get(two)
# check timestamps
self.assertTrue(t1[1] < t2[1])
self.admin.ticket.delete(one)
self.admin.ticket.delete(two)
def test_update_at_time(self):
from tracrpc.util import to_datetime, utc
now = to_datetime(None, utc)
minus1 = to_xmlrpc_datetime(now - datetime.timedelta(hours=1))
minus2 = to_xmlrpc_datetime(now - datetime.timedelta(hours=2))
tid = self.admin.ticket.create("ticket_update_at_time", "ok", {})
self.admin.ticket.update(tid, 'one', {}, False, '', minus2)
self.admin.ticket.update(tid, 'two', {}, False, '', minus1)
self.user.ticket.update(tid, 'three', {}, False, '', minus1)
time.sleep(1)
self.user.ticket.update(tid, 'four', {})
changes = self.admin.ticket.changeLog(tid)
self.assertEquals(4, len(changes))
# quick test to make sure each is older than previous
self.assertTrue(changes[0][0] < changes[1][0] < changes[2][0])
# margin of 2 seconds for tests
justnow = to_xmlrpc_datetime(now - datetime.timedelta(seconds=1))
self.assertTrue(justnow <= changes[2][0])
self.assertTrue(justnow <= changes[3][0])
self.admin.ticket.delete(tid)
def test_update_non_existing(self):
try:
self.admin.ticket.update(3344, "a comment", {})
self.fail("Allowed to update non-existing ticket???")
self.admin.ticket.delete(3234)
except Exception, e:
self.assertTrue("Ticket 3344 does not exist." in str(e))
def test_update_basic(self):
import time
# Basic update check, no 'action' or 'time_changed'
tid = self.admin.ticket.create('test_update_basic1', 'ieidnsj', {
'owner': 'osimons'})
# old-style (deprecated)
self.admin.ticket.update(tid, "comment1", {'component': 'component2'})
self.assertEquals(2, len(self.admin.ticket.changeLog(tid)))
# new-style with 'action'
time.sleep(1) # avoid "columns ticket, time, field are not unique"
self.admin.ticket.update(tid, "comment2", {'component': 'component1',
'action': 'leave'})
self.assertEquals(4, len(self.admin.ticket.changeLog(tid)))
self.admin.ticket.delete(tid)
def test_update_time_changed(self):
# Update with collision check
import datetime
from tracrpc.util import to_utimestamp
from tracrpc.xml_rpc import from_xmlrpc_datetime
tid = self.admin.ticket.create('test_update_time_changed', '...', {})
tid, created, modified, attrs = self.admin.ticket.get(tid)
then = from_xmlrpc_datetime(modified) - datetime.timedelta(minutes=1)
# Unrestricted old-style update (to be removed soon)
try:
self.admin.ticket.update(tid, "comment1",
{'_ts': str(to_utimestamp(then))})
except Exception, e:
self.assertTrue("Ticket has been updated since last get" in str(e))
# Update with 'action' to test new-style update.
try:
self.admin.ticket.update(tid, "comment1",
{'_ts': str(to_utimestamp(then)),
'action': 'leave'})
except Exception, e:
self.assertTrue("modified by someone else" in str(e))
self.admin.ticket.delete(tid)
def test_update_time_same(self):
# Unrestricted old-style update (to be removed soon)
tid = self.admin.ticket.create('test_update_time_same', '...', {})
tid, created, modified, attrs = self.admin.ticket.get(tid)
ts = attrs['_ts']
self.admin.ticket.update(tid, "comment1",
{'_ts': ts})
self.admin.ticket.delete(tid)
# Update with 'action' to test new-style update.
tid = self.admin.ticket.create('test_update_time_same', '...', {})
tid, created, modified, attrs = self.admin.ticket.get(tid)
ts = attrs['_ts']
self.admin.ticket.update(tid, "comment1",
{'_ts': ts, 'action': 'leave'})
self.admin.ticket.delete(tid)
def test_update_action(self):
# Updating with 'action' in attributes
tid = self.admin.ticket.create('test_update_action', 'ss')
current = self.admin.ticket.get(tid)
self.assertEqual('', current[3].get('owner', ''))
updated = self.admin.ticket.update(tid, "comment1",
{'action': 'reassign',
'action_reassign_reassign_owner': 'user'})
self.assertEqual('user', updated[3].get('owner'))
self.admin.ticket.delete(tid)
def test_update_action_non_existing(self):
# Updating with non-existing 'action' in attributes
tid = self.admin.ticket.create('test_update_action_wrong', 'ss')
try:
self.admin.ticket.update(tid, "comment1",
{'action': 'reassign',
'action_reassign_reassign_owner': 'user'})
except Exception, e:
self.assertTrue("invalid action" in str(e))
self.admin.ticket.delete(tid)
def test_update_field_non_existing(self):
tid = self.admin.ticket.create('test_update_field_non_existing', 'yw3')
try:
self.admin.ticket.update(tid, "comment1",
{'does_not_exist': 'eiwrjoer'})
except Exception, e:
self.assertTrue("no such column" in str(e))
self.admin.ticket.delete(tid)
def test_create_ticket_9096(self):
# See http://trac-hacks.org/ticket/9096
import urllib2, base64
body = """<?xml version="1.0"?>
<methodCall>
<methodName>ticket.create</methodName>
<params>
<param><string>test summary</string></param>
<param><string>test desc</string></param>
</params>
</methodCall>"""
request = urllib2.Request(rpc_testenv.url + '/login/rpc', data=body)
request.add_header('Content-Type', 'application/xml')
request.add_header('Content-Length', str(len(body)))
request.add_header('Authorization', 'Basic %s' \
% base64.encodestring('admin:admin')[:-1])
self.assertEquals('POST', request.get_method())
response = urllib2.urlopen(request)
self.assertEquals(200, response.code)
self.assertEquals("<?xml version='1.0'?>\n"
"<methodResponse>\n"
"<params>\n"
"<param>\n"
"<value><int>1</int></value>\n"
"</param>\n"
"</params>\n"
"</methodResponse>\n", response.read())
self.admin.ticket.delete(1)
class RpcTicketVersionTestCase(TracRpcTestCase):
def setUp(self):
TracRpcTestCase.setUp(self)
self.anon = xmlrpclib.ServerProxy(rpc_testenv.url_anon)
self.user = xmlrpclib.ServerProxy(rpc_testenv.url_user)
self.admin = xmlrpclib.ServerProxy(rpc_testenv.url_admin)
def tearDown(self):
TracRpcTestCase.tearDown(self)
def test_create(self):
from tracrpc.util import to_datetime, utc
dt = to_xmlrpc_datetime(to_datetime(None, utc))
desc = "test version"
v = self.admin.ticket.version.create('9.99',
{'time': dt, 'description': desc})
self.failUnless('9.99' in self.admin.ticket.version.getAll())
self.assertEquals({'time': dt, 'description': desc, 'name': '9.99'},
self.admin.ticket.version.get('9.99'))
def test_suite():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(RpcTicketTestCase))
test_suite.addTest(unittest.makeSuite(RpcTicketVersionTestCase))
return test_suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
{
"content_hash": "9997c35fa05e9c22d60171c164e23bc6",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 101,
"avg_line_length": 46.20099255583126,
"alnum_prop": 0.5822009774961061,
"repo_name": "hexenxp14/tracxmlrpc",
"id": "60b4d64d0b777ee1f465c098ae3b4bd2faf915ba",
"size": "18643",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tracrpc/tests/ticket.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "201"
},
{
"name": "JavaScript",
"bytes": "1096"
},
{
"name": "Python",
"bytes": "133198"
}
],
"symlink_target": ""
}
|
import stringmanipulation
import filemanagement
import sys
extensions = ['.h','.cc','.c','.cpp']
ignore_these = ['my_ignore_header.h']
if((len(sys.argv) != 2) and (len(sys.argv) != 3)):
print 'parameters are: directory [--commit]'
quit()
directory = sys.argv[1];
if(not filemanagement.pathexist(directory)):
print 'path ' + directory + ' does not exist'
quit()
if((len(sys.argv) == 3) and (sys.argv[2] != '--commit')):
print 'parameters are: parent directory extension new extension [--commit]'
quit()
commit = False
if(len(sys.argv) == 3):
commit = True
files_to_fix = []
for extension in extensions:
files_to_fix.extend(filemanagement.listallfilesinfolder(directory,\
extension))
# Just steal the header from the template
def fileheaderasstring():
template_file_name = 'license_template.txt'
if (not filemanagement.fileexist(template_file_name)):
print 'File ' + template_file_name + ' not found!'
quit()
template_file = open(template_file_name,'r')
return_string = ''
for line in template_file:
return_string += line
return return_string
# Just steal the header from the template
def fileheaderasarray():
template_file_name = 'license_template.txt'
if (not filemanagement.fileexist(template_file_name)):
print 'File ' + template_file_name + ' not found!'
quit()
template_file = open(template_file_name,'r')
return_value = []
for line in template_file:
return_value.append(line)
return return_value
def findheader(path, file_name):
full_file_name = path + file_name
if (not filemanagement.fileexist(full_file_name)):
print 'File ' + file_name + ' not found!'
print 'Unexpected error!'
quit()
file_handle = open(full_file_name)
template_file_content = fileheaderasarray()
compare_content = []
# load the same number of lines from file as the fileheader
for index in range(len(template_file_content)):
line = file_handle.readline()
if (line == ''):
return False
compare_content.append(line)
while (True):
found = True
for index in range(len(template_file_content)):
line1 = template_file_content[index]
line2 = compare_content[index]
if(line1 != line2):
found = False
break
if (found):
return True
compare_content = compare_content[1:len(compare_content)]
line = file_handle.readline()
if (line == ''):
return False
compare_content.append(line)
return False
# Used to store temporary result before flushing to real file when finished
def temporaryfilename(old_file_name):
return old_file_name + '.deleteme'
def updatefile(path, old_file_name):
full_old_file_name = path + old_file_name
if (not filemanagement.fileexist(full_old_file_name)):
print 'File ' + full_old_file_name + ' is not found.'
print 'Should not happen! Ever!'
quit()
full_temporary_file_name = path + temporaryfilename(old_file_name)
# Make sure that the files are closed by putting them out of scope
old_file = open(full_old_file_name,'r')
temporary_file = open(full_temporary_file_name,'w')
temporary_file.writelines(fileheaderasstring())
remove_whitespaces = True
for line in old_file:
if (remove_whitespaces and (len(line.split()) == 0)):
continue
else:
remove_whitespaces = False
temporary_file.writelines(line)
old_file.close()
temporary_file.close()
filemanagement.copyfile(full_old_file_name,full_temporary_file_name)
filemanagement.deletefile(full_temporary_file_name)
failed_files = []
skipped_files = []
for index in range(len(files_to_fix)):
if(commit):
print (100*index)/len(files_to_fix)
path_dir = files_to_fix[index][0]
filename = files_to_fix[index][1]
is_ignore = False
for ignore_names in ignore_these:
if(filename == ignore_names):
is_ignore = True
break
if(is_ignore):
continue
# Let the word copyright be our sanity, i.e. make sure there is only one
# copy right occurance or report that there will be no change
if(filemanagement.findstringinfile(path_dir,filename,'Copyright') or
filemanagement.findstringinfile(path_dir,filename,'copyright') or
filemanagement.findstringinfile(path_dir,filename,'COPYRIGHT')):
if(findheader(path_dir,filename)):
skipped_files.append(path_dir + filename)
else:
failed_files.append(path_dir + filename)
continue
if (not commit):
print 'File ' + path_dir + filename + ' will be updated'
continue
updatefile(path_dir,filename)
tense = 'will be'
if (commit):
tense = 'has been'
if (len(skipped_files) > 0):
print str(len(skipped_files)) + ' file(s) ' + tense + ' skipped since they already have the correct header'
if (len(failed_files) > 0):
print 'Following files seem to have an invalid file header:'
for line in failed_files:
print line
|
{
"content_hash": "a92802a19d48b72cf84a4f0576bdf9ef",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 111,
"avg_line_length": 32.422360248447205,
"alnum_prop": 0.6346743295019157,
"repo_name": "stoiczek/WebRTC",
"id": "01c8a8b4e184e2132185d032d25c55174fe3f639",
"size": "5243",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tools/refactoring/addfileheader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "7932"
},
{
"name": "C",
"bytes": "5240930"
},
{
"name": "C++",
"bytes": "11114431"
},
{
"name": "Java",
"bytes": "148890"
},
{
"name": "Objective-C",
"bytes": "232287"
},
{
"name": "Python",
"bytes": "4221"
}
],
"symlink_target": ""
}
|
import operator
from numpy.testing import assert_raises
import numpy as np
import pytest
from .. import ones, asarray, reshape, result_type, all, equal
from .._array_object import Array
from .._dtypes import (
_all_dtypes,
_boolean_dtypes,
_floating_dtypes,
_integer_dtypes,
_integer_or_boolean_dtypes,
_numeric_dtypes,
int8,
int16,
int32,
int64,
uint64,
bool as bool_,
)
def test_validate_index():
# The indexing tests in the official array API test suite test that the
# array object correctly handles the subset of indices that are required
# by the spec. But the NumPy array API implementation specifically
# disallows any index not required by the spec, via Array._validate_index.
# This test focuses on testing that non-valid indices are correctly
# rejected. See
# https://data-apis.org/array-api/latest/API_specification/indexing.html
# and the docstring of Array._validate_index for the exact indexing
# behavior that should be allowed. This does not test indices that are
# already invalid in NumPy itself because Array will generally just pass
# such indices directly to the underlying np.ndarray.
a = ones((3, 4))
# Out of bounds slices are not allowed
assert_raises(IndexError, lambda: a[:4])
assert_raises(IndexError, lambda: a[:-4])
assert_raises(IndexError, lambda: a[:3:-1])
assert_raises(IndexError, lambda: a[:-5:-1])
assert_raises(IndexError, lambda: a[4:])
assert_raises(IndexError, lambda: a[-4:])
assert_raises(IndexError, lambda: a[4::-1])
assert_raises(IndexError, lambda: a[-4::-1])
assert_raises(IndexError, lambda: a[...,:5])
assert_raises(IndexError, lambda: a[...,:-5])
assert_raises(IndexError, lambda: a[...,:5:-1])
assert_raises(IndexError, lambda: a[...,:-6:-1])
assert_raises(IndexError, lambda: a[...,5:])
assert_raises(IndexError, lambda: a[...,-5:])
assert_raises(IndexError, lambda: a[...,5::-1])
assert_raises(IndexError, lambda: a[...,-5::-1])
# Boolean indices cannot be part of a larger tuple index
assert_raises(IndexError, lambda: a[a[:,0]==1,0])
assert_raises(IndexError, lambda: a[a[:,0]==1,...])
assert_raises(IndexError, lambda: a[..., a[0]==1])
assert_raises(IndexError, lambda: a[[True, True, True]])
assert_raises(IndexError, lambda: a[(True, True, True),])
# Integer array indices are not allowed (except for 0-D)
idx = asarray([[0, 1]])
assert_raises(IndexError, lambda: a[idx])
assert_raises(IndexError, lambda: a[idx,])
assert_raises(IndexError, lambda: a[[0, 1]])
assert_raises(IndexError, lambda: a[(0, 1), (0, 1)])
assert_raises(IndexError, lambda: a[[0, 1]])
assert_raises(IndexError, lambda: a[np.array([[0, 1]])])
# Multiaxis indices must contain exactly as many indices as dimensions
assert_raises(IndexError, lambda: a[()])
assert_raises(IndexError, lambda: a[0,])
assert_raises(IndexError, lambda: a[0])
assert_raises(IndexError, lambda: a[:])
def test_operators():
# For every operator, we test that it works for the required type
# combinations and raises TypeError otherwise
binary_op_dtypes = {
"__add__": "numeric",
"__and__": "integer_or_boolean",
"__eq__": "all",
"__floordiv__": "numeric",
"__ge__": "numeric",
"__gt__": "numeric",
"__le__": "numeric",
"__lshift__": "integer",
"__lt__": "numeric",
"__mod__": "numeric",
"__mul__": "numeric",
"__ne__": "all",
"__or__": "integer_or_boolean",
"__pow__": "numeric",
"__rshift__": "integer",
"__sub__": "numeric",
"__truediv__": "floating",
"__xor__": "integer_or_boolean",
}
# Recompute each time because of in-place ops
def _array_vals():
for d in _integer_dtypes:
yield asarray(1, dtype=d)
for d in _boolean_dtypes:
yield asarray(False, dtype=d)
for d in _floating_dtypes:
yield asarray(1.0, dtype=d)
for op, dtypes in binary_op_dtypes.items():
ops = [op]
if op not in ["__eq__", "__ne__", "__le__", "__ge__", "__lt__", "__gt__"]:
rop = "__r" + op[2:]
iop = "__i" + op[2:]
ops += [rop, iop]
for s in [1, 1.0, False]:
for _op in ops:
for a in _array_vals():
# Test array op scalar. From the spec, the following combinations
# are supported:
# - Python bool for a bool array dtype,
# - a Python int within the bounds of the given dtype for integer array dtypes,
# - a Python int or float for floating-point array dtypes
# We do not do bounds checking for int scalars, but rather use the default
# NumPy behavior for casting in that case.
if ((dtypes == "all"
or dtypes == "numeric" and a.dtype in _numeric_dtypes
or dtypes == "integer" and a.dtype in _integer_dtypes
or dtypes == "integer_or_boolean" and a.dtype in _integer_or_boolean_dtypes
or dtypes == "boolean" and a.dtype in _boolean_dtypes
or dtypes == "floating" and a.dtype in _floating_dtypes
)
# bool is a subtype of int, which is why we avoid
# isinstance here.
and (a.dtype in _boolean_dtypes and type(s) == bool
or a.dtype in _integer_dtypes and type(s) == int
or a.dtype in _floating_dtypes and type(s) in [float, int]
)):
# Only test for no error
getattr(a, _op)(s)
else:
assert_raises(TypeError, lambda: getattr(a, _op)(s))
# Test array op array.
for _op in ops:
for x in _array_vals():
for y in _array_vals():
# See the promotion table in NEP 47 or the array
# API spec page on type promotion. Mixed kind
# promotion is not defined.
if (x.dtype == uint64 and y.dtype in [int8, int16, int32, int64]
or y.dtype == uint64 and x.dtype in [int8, int16, int32, int64]
or x.dtype in _integer_dtypes and y.dtype not in _integer_dtypes
or y.dtype in _integer_dtypes and x.dtype not in _integer_dtypes
or x.dtype in _boolean_dtypes and y.dtype not in _boolean_dtypes
or y.dtype in _boolean_dtypes and x.dtype not in _boolean_dtypes
or x.dtype in _floating_dtypes and y.dtype not in _floating_dtypes
or y.dtype in _floating_dtypes and x.dtype not in _floating_dtypes
):
assert_raises(TypeError, lambda: getattr(x, _op)(y))
# Ensure in-place operators only promote to the same dtype as the left operand.
elif (
_op.startswith("__i")
and result_type(x.dtype, y.dtype) != x.dtype
):
assert_raises(TypeError, lambda: getattr(x, _op)(y))
# Ensure only those dtypes that are required for every operator are allowed.
elif (dtypes == "all" and (x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes
or x.dtype in _numeric_dtypes and y.dtype in _numeric_dtypes)
or (dtypes == "numeric" and x.dtype in _numeric_dtypes and y.dtype in _numeric_dtypes)
or dtypes == "integer" and x.dtype in _integer_dtypes and y.dtype in _numeric_dtypes
or dtypes == "integer_or_boolean" and (x.dtype in _integer_dtypes and y.dtype in _integer_dtypes
or x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes)
or dtypes == "boolean" and x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes
or dtypes == "floating" and x.dtype in _floating_dtypes and y.dtype in _floating_dtypes
):
getattr(x, _op)(y)
else:
assert_raises(TypeError, lambda: getattr(x, _op)(y))
unary_op_dtypes = {
"__abs__": "numeric",
"__invert__": "integer_or_boolean",
"__neg__": "numeric",
"__pos__": "numeric",
}
for op, dtypes in unary_op_dtypes.items():
for a in _array_vals():
if (
dtypes == "numeric"
and a.dtype in _numeric_dtypes
or dtypes == "integer_or_boolean"
and a.dtype in _integer_or_boolean_dtypes
):
# Only test for no error
getattr(a, op)()
else:
assert_raises(TypeError, lambda: getattr(a, op)())
# Finally, matmul() must be tested separately, because it works a bit
# different from the other operations.
def _matmul_array_vals():
for a in _array_vals():
yield a
for d in _all_dtypes:
yield ones((3, 4), dtype=d)
yield ones((4, 2), dtype=d)
yield ones((4, 4), dtype=d)
# Scalars always error
for _op in ["__matmul__", "__rmatmul__", "__imatmul__"]:
for s in [1, 1.0, False]:
for a in _matmul_array_vals():
if (type(s) in [float, int] and a.dtype in _floating_dtypes
or type(s) == int and a.dtype in _integer_dtypes):
# Type promotion is valid, but @ is not allowed on 0-D
# inputs, so the error is a ValueError
assert_raises(ValueError, lambda: getattr(a, _op)(s))
else:
assert_raises(TypeError, lambda: getattr(a, _op)(s))
for x in _matmul_array_vals():
for y in _matmul_array_vals():
if (x.dtype == uint64 and y.dtype in [int8, int16, int32, int64]
or y.dtype == uint64 and x.dtype in [int8, int16, int32, int64]
or x.dtype in _integer_dtypes and y.dtype not in _integer_dtypes
or y.dtype in _integer_dtypes and x.dtype not in _integer_dtypes
or x.dtype in _floating_dtypes and y.dtype not in _floating_dtypes
or y.dtype in _floating_dtypes and x.dtype not in _floating_dtypes
or x.dtype in _boolean_dtypes
or y.dtype in _boolean_dtypes
):
assert_raises(TypeError, lambda: x.__matmul__(y))
assert_raises(TypeError, lambda: y.__rmatmul__(x))
assert_raises(TypeError, lambda: x.__imatmul__(y))
elif x.shape == () or y.shape == () or x.shape[1] != y.shape[0]:
assert_raises(ValueError, lambda: x.__matmul__(y))
assert_raises(ValueError, lambda: y.__rmatmul__(x))
if result_type(x.dtype, y.dtype) != x.dtype:
assert_raises(TypeError, lambda: x.__imatmul__(y))
else:
assert_raises(ValueError, lambda: x.__imatmul__(y))
else:
x.__matmul__(y)
y.__rmatmul__(x)
if result_type(x.dtype, y.dtype) != x.dtype:
assert_raises(TypeError, lambda: x.__imatmul__(y))
elif y.shape[0] != y.shape[1]:
# This one fails because x @ y has a different shape from x
assert_raises(ValueError, lambda: x.__imatmul__(y))
else:
x.__imatmul__(y)
def test_python_scalar_construtors():
b = asarray(False)
i = asarray(0)
f = asarray(0.0)
assert bool(b) == False
assert int(i) == 0
assert float(f) == 0.0
assert operator.index(i) == 0
# bool/int/float should only be allowed on 0-D arrays.
assert_raises(TypeError, lambda: bool(asarray([False])))
assert_raises(TypeError, lambda: int(asarray([0])))
assert_raises(TypeError, lambda: float(asarray([0.0])))
assert_raises(TypeError, lambda: operator.index(asarray([0])))
# bool/int/float should only be allowed on arrays of the corresponding
# dtype
assert_raises(ValueError, lambda: bool(i))
assert_raises(ValueError, lambda: bool(f))
assert_raises(ValueError, lambda: int(b))
assert_raises(ValueError, lambda: int(f))
assert_raises(ValueError, lambda: float(b))
assert_raises(ValueError, lambda: float(i))
assert_raises(TypeError, lambda: operator.index(b))
assert_raises(TypeError, lambda: operator.index(f))
def test_device_property():
a = ones((3, 4))
assert a.device == 'cpu'
assert all(equal(a.to_device('cpu'), a))
assert_raises(ValueError, lambda: a.to_device('gpu'))
assert all(equal(asarray(a, device='cpu'), a))
assert_raises(ValueError, lambda: asarray(a, device='gpu'))
def test_array_properties():
a = ones((1, 2, 3))
b = ones((2, 3))
assert_raises(ValueError, lambda: a.T)
assert isinstance(b.T, Array)
assert b.T.shape == (3, 2)
assert isinstance(a.mT, Array)
assert a.mT.shape == (1, 3, 2)
assert isinstance(b.mT, Array)
assert b.mT.shape == (3, 2)
def test___array__():
a = ones((2, 3), dtype=int16)
assert np.asarray(a) is a._array
b = np.asarray(a, dtype=np.float64)
assert np.all(np.equal(b, np.ones((2, 3), dtype=np.float64)))
assert b.dtype == np.float64
def test_allow_newaxis():
a = ones(5)
indexed_a = a[None, :]
assert indexed_a.shape == (1, 5)
def test_disallow_flat_indexing_with_newaxis():
a = ones((3, 3, 3))
with pytest.raises(IndexError):
a[None, 0, 0]
def test_disallow_mask_with_newaxis():
a = ones((3, 3, 3))
with pytest.raises(IndexError):
a[None, asarray(True)]
@pytest.mark.parametrize("shape", [(), (5,), (3, 3, 3)])
@pytest.mark.parametrize("index", ["string", False, True])
def test_error_on_invalid_index(shape, index):
a = ones(shape)
with pytest.raises(IndexError):
a[index]
def test_mask_0d_array_without_errors():
a = ones(())
a[asarray(True)]
@pytest.mark.parametrize(
"i", [slice(5), slice(5, 0), asarray(True), asarray([0, 1])]
)
def test_error_on_invalid_index_with_ellipsis(i):
a = ones((3, 3, 3))
with pytest.raises(IndexError):
a[..., i]
with pytest.raises(IndexError):
a[i, ...]
def test_array_keys_use_private_array():
"""
Indexing operations convert array keys before indexing the internal array
Fails when array_api array keys are not converted into NumPy-proper arrays
in __getitem__(). This is achieved by passing array_api arrays with 0-sized
dimensions, which NumPy-proper treats erroneously - not sure why!
TODO: Find and use appropriate __setitem__() case.
"""
a = ones((0, 0), dtype=bool_)
assert a[a].shape == (0,)
a = ones((0,), dtype=bool_)
key = ones((0, 0), dtype=bool_)
with pytest.raises(IndexError):
a[key]
|
{
"content_hash": "ba604455bca8a2a724694602b0fa024e",
"timestamp": "",
"source": "github",
"line_count": 375,
"max_line_length": 132,
"avg_line_length": 42.056,
"alnum_prop": 0.5429585948893538,
"repo_name": "endolith/numpy",
"id": "f6efacefaee130f3bfeed144809a485c3b88a309",
"size": "15771",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "numpy/array_api/tests/test_array_object.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5987269"
},
{
"name": "C++",
"bytes": "212894"
},
{
"name": "Cython",
"bytes": "146262"
},
{
"name": "D",
"bytes": "19"
},
{
"name": "Dockerfile",
"bytes": "5266"
},
{
"name": "Forth",
"bytes": "3787"
},
{
"name": "Fortran",
"bytes": "24695"
},
{
"name": "Makefile",
"bytes": "1697"
},
{
"name": "Python",
"bytes": "10541563"
},
{
"name": "Shell",
"bytes": "25901"
},
{
"name": "Smarty",
"bytes": "4104"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
}
|
"""Provide variant calling with VarScan from TGI at Wash U.
http://varscan.sourceforge.net/
"""
import os
import sys
from bcbio import broad, utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.pipeline import config_utils
from bcbio.provenance import do
from bcbio.variation import samtools, vcfutils
from bcbio.variation.vcfutils import (combine_variant_files, write_empty_vcf,
get_paired_bams, bgzip_and_index)
import pysam
def run_varscan(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
paired = get_paired_bams(align_bams, items)
if paired and paired.normal_bam and paired.tumor_bam:
call_file = samtools.shared_variantcall(_varscan_paired, "varscan",
align_bams, ref_file, items,
assoc_files, region, out_file)
else:
vcfutils.check_paired_problems(items)
call_file = samtools.shared_variantcall(_varscan_work, "varscan",
align_bams, ref_file,
items, assoc_files,
region, out_file)
return call_file
def _get_jvm_opts(config, tmp_dir):
"""Retrieve common options for running VarScan.
Handles jvm_opts, setting user and country to English to avoid issues
with different locales producing non-compliant VCF.
"""
resources = config_utils.get_resources("varscan", config)
jvm_opts = resources.get("jvm_opts", ["-Xmx750m", "-Xmx2g"])
jvm_opts = config_utils.adjust_opts(jvm_opts,
{"algorithm": {"memory_adjust":
{"magnitude": 1.1, "direction": "decrease"}}})
jvm_opts += ["-Duser.language=en", "-Duser.country=US"]
jvm_opts += broad.get_default_jvm_opts(tmp_dir)
return " ".join(jvm_opts)
def _varscan_options_from_config(config):
"""Retrieve additional options for VarScan from the configuration.
"""
opts = ["--min-coverage 5", "--p-value 0.98", "--strand-filter 1"]
resources = config_utils.get_resources("varscan", config)
if resources.get("options"):
opts += [str(x) for x in resources["options"]]
return opts
def spv_freq_filter(line, tumor_index):
"""Filter VarScan calls based on the SPV value and frequency.
Removes calls with SPV < 0.05 and a tumor FREQ > 0.35.
False positives dominate these higher frequency, low SPV calls. They appear
to be primarily non-somatic/germline variants not removed by other filters.
"""
if line.startswith("#CHROM"):
headers = [('##FILTER=<ID=SpvFreq,Description="High frequency (tumor FREQ > 0.35) '
'and low p-value for somatic (SPV < 0.05)">')]
return "\n".join(headers) + "\n" + line
elif line.startswith("#"):
return line
else:
parts = line.split("\t")
sample_ft = {a: v for (a, v) in zip(parts[8].split(":"), parts[9 + tumor_index].split(":"))}
freq = utils.safe_to_float(sample_ft.get("FREQ"))
spvs = [x for x in parts[7].split(";") if x.startswith("SPV=")]
spv = utils.safe_to_float(spvs[0].split("=")[-1] if spvs else None)
fname = None
if spv is not None and freq is not None:
if spv < 0.05 and freq > 0.35:
fname = "SpvFreq"
if fname:
if parts[6] in set([".", "PASS"]):
parts[6] = fname
else:
parts[6] += ";%s" % fname
line = "\t".join(parts)
return line
def _varscan_paired(align_bams, ref_file, items, target_regions, out_file):
"""Run a paired VarScan analysis, also known as "somatic". """
max_read_depth = "1000"
config = items[0]["config"]
paired = get_paired_bams(align_bams, items)
if not paired.normal_bam:
affected_batch = items[0]["metadata"]["batch"]
message = ("Batch {} requires both tumor and normal BAM files for"
" VarScan cancer calling").format(affected_batch)
raise ValueError(message)
if not utils.file_exists(out_file):
assert out_file.endswith(".vcf.gz"), "Expect bgzipped output to VarScan"
normal_mpileup_cl = samtools.prep_mpileup([paired.normal_bam], ref_file,
config, max_read_depth,
target_regions=target_regions,
want_bcf=False)
tumor_mpileup_cl = samtools.prep_mpileup([paired.tumor_bam], ref_file,
config, max_read_depth,
target_regions=target_regions,
want_bcf=False)
base, ext = utils.splitext_plus(out_file)
indel_file = base + "-indel.vcf"
snp_file = base + "-snp.vcf"
with file_transaction(config, indel_file, snp_file) as (tx_indel, tx_snp):
with tx_tmpdir(items[0]) as tmp_dir:
jvm_opts = _get_jvm_opts(config, tmp_dir)
opts = " ".join(_varscan_options_from_config(config))
remove_zerocoverage = r"{ ifne grep -v -P '\t0\t\t$' || true; }"
export = utils.local_path_export()
varscan_cmd = ("{export} varscan {jvm_opts} somatic "
"<({normal_mpileup_cl} | {remove_zerocoverage}) "
"<({tumor_mpileup_cl} | {remove_zerocoverage}) "
"--output-snp {tx_snp} --output-indel {tx_indel} "
"--output-vcf {opts} ")
# add minimum AF
min_af = float(utils.get_in(paired.tumor_config, ("algorithm",
"min_allele_fraction"), 10)) / 100.0
varscan_cmd += "--min-var-freq {min_af} "
do.run(varscan_cmd.format(**locals()), "Varscan", None, None)
to_combine = []
for fname in [snp_file, indel_file]:
if utils.file_exists(fname):
fix_file = "%s-fix.vcf.gz" % (utils.splitext_plus(fname)[0])
with file_transaction(config, fix_file) as tx_fix_file:
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
normal_name = paired.normal_name
tumor_name = paired.tumor_name
cmd = ("cat {fname} | "
"{py_cl} -x 'bcbio.variation.varscan.fix_varscan_output(x,"
""" "{normal_name}", "{tumor_name}")' | """
"{fix_ambig_ref} | {fix_ambig_alt} | ifne vcfuniqalleles | "
"""{py_cl} -x 'bcbio.variation.vcfutils.add_contig_to_header(x, "{ref_file}")' | """
"""bcftools filter -m + -s REJECT -e "SS != '.' && SS != '2'" 2> /dev/null | """
"{py_cl} -x 'bcbio.variation.varscan.spv_freq_filter(x, 1)' | "
"bgzip -c > {tx_fix_file}")
do.run(cmd.format(**locals()), "Varscan paired fix")
to_combine.append(fix_file)
if not to_combine:
out_file = write_empty_vcf(out_file, config)
else:
out_file = combine_variant_files(to_combine,
out_file, ref_file, config,
region=target_regions)
if os.path.getsize(out_file) == 0:
write_empty_vcf(out_file)
if out_file.endswith(".gz"):
out_file = bgzip_and_index(out_file, config)
def fix_varscan_output(line, normal_name="", tumor_name=""):
"""Fix a varscan VCF line.
Fixes the ALT column and also fixes floating point values
output as strings to by Floats: FREQ, SSC.
This function was contributed by Sean Davis <sdavis2@mail.nih.gov>,
with minor modifications by Luca Beltrame <luca.beltrame@marionegri.it>.
"""
line = line.strip()
tofix = ("##INFO=<ID=SSC", "##FORMAT=<ID=FREQ")
if(line.startswith("##")):
if line.startswith(tofix):
line = line.replace('Number=1,Type=String',
'Number=1,Type=Float')
return line
line = line.split("\t")
if line[0].startswith("#CHROM"):
if tumor_name and normal_name:
mapping = {"NORMAL": normal_name, "TUMOR": tumor_name}
base_header = line[:9]
old_samples = line[9:]
if len(old_samples) == 0:
return "\t".join(line)
samples = [mapping[sample_name] for sample_name in old_samples]
assert len(old_samples) == len(samples)
return "\t".join(base_header + samples)
else:
return "\t".join(line)
try:
REF, ALT = line[3:5]
except ValueError:
return "\t".join(line)
def _normalize_freq(line, sample_i):
"""Ensure FREQ genotype value is float as defined in header.
"""
ft_parts = line[8].split(":")
dat = line[sample_i].split(":")
# Non-conforming no-call sample, don't try to fix FREQ
if len(dat) != len(ft_parts):
return line
freq_i = ft_parts.index("FREQ")
try:
dat[freq_i] = str(float(dat[freq_i].rstrip("%")) / 100)
except ValueError: # illegal binary characters -- set frequency to zero
dat[freq_i] = "0.0"
line[sample_i] = ":".join(dat)
return line
if len(line) > 9:
line = _normalize_freq(line, 9)
if len(line) > 10:
line = _normalize_freq(line, 10)
# HACK: The position of the SS= changes, so we just search for it
ss_vals = [item for item in line[7].split(";") if item.startswith("SS=")]
if len(ss_vals) > 0:
somatic_status = int(ss_vals[0].split("=")[1]) # Get the number
else:
somatic_status = None
if somatic_status == 5:
# "Unknown" states are broken in current versions of VarScan
# so we just bail out here for now
return
# fix FREQ for any additional samples -- multi-sample VarScan calling
if len(line) > 11:
for i in range(11, len(line)):
line = _normalize_freq(line, i)
#FIXME: VarScan also produces invalid REF records (e.g. CAA/A)
# This is not handled yet.
if "+" in ALT or "-" in ALT:
if "/" not in ALT:
if ALT[0] == "+":
R = REF
A = REF + ALT[1:]
elif ALT[0] == "-":
R = REF + ALT[1:]
A = REF
else:
Ins = [p[1:] for p in ALT.split("/") if p[0] == "+"]
Del = [p[1:] for p in ALT.split("/") if p[0] == "-"]
if len(Del):
REF += sorted(Del, key=lambda x: len(x))[-1]
A = ",".join([REF[::-1].replace(p[::-1], "", 1)[::-1]
for p in Del] + [REF + p for p in Ins])
R = REF
REF = R
ALT = A
else:
ALT = ALT.replace('/', ',')
line[3] = REF
line[4] = ALT
return "\t".join(line)
def _create_sample_list(in_bams, vcf_file):
"""Pull sample names from input BAMs and create input sample list.
"""
out_file = "%s-sample_list.txt" % os.path.splitext(vcf_file)[0]
with open(out_file, "w") as out_handle:
for in_bam in in_bams:
with pysam.Samfile(in_bam, "rb") as work_bam:
for rg in work_bam.header.get("RG", []):
out_handle.write("%s\n" % rg["SM"])
return out_file
def _varscan_work(align_bams, ref_file, items, target_regions, out_file):
"""Perform SNP and indel genotyping with VarScan.
"""
config = items[0]["config"]
orig_out_file = out_file
out_file = orig_out_file.replace(".vcf.gz", ".vcf")
max_read_depth = "1000"
sample_list = _create_sample_list(align_bams, out_file)
mpileup = samtools.prep_mpileup(align_bams, ref_file, config, max_read_depth,
target_regions=target_regions, want_bcf=False)
# VarScan fails to generate a header on files that start with
# zerocoverage calls; strip these with grep, we're not going to
# call on them
remove_zerocoverage = r"{ ifne grep -v -P '\t0\t\t$' || true; }"
# we use ifne from moreutils to ensure we process only on files with input, skipping otherwise
# http://manpages.ubuntu.com/manpages/natty/man1/ifne.1.html
with tx_tmpdir(items[0]) as tmp_dir:
jvm_opts = _get_jvm_opts(config, tmp_dir)
opts = " ".join(_varscan_options_from_config(config))
min_af = float(utils.get_in(config, ("algorithm", "min_allele_fraction"), 10)) / 100.0
fix_ambig_ref = vcfutils.fix_ambiguous_cl()
fix_ambig_alt = vcfutils.fix_ambiguous_cl(5)
py_cl = os.path.join(os.path.dirname(sys.executable), "py")
export = utils.local_path_export()
cmd = ("{export} {mpileup} | {remove_zerocoverage} | "
"ifne varscan {jvm_opts} mpileup2cns {opts} "
"--vcf-sample-list {sample_list} --min-var-freq {min_af} --output-vcf --variants | "
"""{py_cl} -x 'bcbio.variation.vcfutils.add_contig_to_header(x, "{ref_file}")' | """
"{py_cl} -x 'bcbio.variation.varscan.fix_varscan_output(x)' | "
"{fix_ambig_ref} | {fix_ambig_alt} | ifne vcfuniqalleles > {out_file}")
do.run(cmd.format(**locals()), "Varscan", None,
[do.file_exists(out_file)])
os.remove(sample_list)
# VarScan can create completely empty files in regions without
# variants, so we create a correctly formatted empty file
if os.path.getsize(out_file) == 0:
write_empty_vcf(out_file)
if orig_out_file.endswith(".gz"):
vcfutils.bgzip_and_index(out_file, config)
|
{
"content_hash": "a72515a79d014c921f7429c02c2521c4",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 111,
"avg_line_length": 43.823170731707314,
"alnum_prop": 0.531932656184778,
"repo_name": "biocyberman/bcbio-nextgen",
"id": "b3e2510b3bb91ebb47e0f00bc060648f47b2b045",
"size": "14374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bcbio/variation/varscan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "3399"
},
{
"name": "Python",
"bytes": "2191067"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "14131"
}
],
"symlink_target": ""
}
|
"""This module contains classes that represent Telegram errors.
.. versionchanged:: 20.0
Replaced ``Unauthorized`` by :class:`Forbidden`.
"""
__all__ = (
"BadRequest",
"ChatMigrated",
"Conflict",
"Forbidden",
"InvalidToken",
"NetworkError",
"PassportDecryptionError",
"RetryAfter",
"TelegramError",
"TimedOut",
)
from typing import Tuple, Union
def _lstrip_str(in_s: str, lstr: str) -> str:
"""
Args:
in_s (:obj:`str`): in string
lstr (:obj:`str`): substr to strip from left side
Returns:
:obj:`str`: The stripped string.
"""
if in_s.startswith(lstr):
res = in_s[len(lstr) :]
else:
res = in_s
return res
class TelegramError(Exception):
"""
Base class for Telegram errors.
.. seealso:: `Exceptions, Warnings and Logging <https://github.com/\
python-telegram-bot/python-telegram-bot/wiki/Exceptions%2C-Warnings-and-Logging>`_
"""
__slots__ = ("message",)
def __init__(self, message: str):
super().__init__()
msg = _lstrip_str(message, "Error: ")
msg = _lstrip_str(msg, "[Error]: ")
msg = _lstrip_str(msg, "Bad Request: ")
if msg != message:
# api_error - capitalize the msg...
msg = msg.capitalize()
self.message = msg
def __str__(self) -> str:
return self.message
def __repr__(self) -> str:
return f"{self.__class__.__name__}('{self.message}')"
def __reduce__(self) -> Tuple[type, Tuple[str]]:
return self.__class__, (self.message,)
class Forbidden(TelegramError):
"""Raised when the bot has not enough rights to perform the requested action.
Examples:
:any:`Raw API Bot <examples.rawapibot>`
.. versionchanged:: 20.0
This class was previously named ``Unauthorized``.
"""
__slots__ = ()
class InvalidToken(TelegramError):
"""Raised when the token is invalid.
Args:
message (:obj:`str`, optional): Any additional information about the exception.
.. versionadded:: 20.0
"""
__slots__ = ()
def __init__(self, message: str = None) -> None:
super().__init__("Invalid token" if message is None else message)
class NetworkError(TelegramError):
"""Base class for exceptions due to networking errors.
Examples:
:any:`Raw API Bot <examples.rawapibot>`
"""
__slots__ = ()
class BadRequest(NetworkError):
"""Raised when Telegram could not process the request correctly."""
__slots__ = ()
class TimedOut(NetworkError):
"""Raised when a request took too long to finish.
Args:
message (:obj:`str`, optional): Any additional information about the exception.
.. versionadded:: 20.0
"""
__slots__ = ()
def __init__(self, message: str = None) -> None:
super().__init__(message or "Timed out")
class ChatMigrated(TelegramError):
"""
Raised when the requested group chat migrated to supergroup and has a new chat id.
.. seealso:: `Storing Bot, User and Chat Related Data <https://github.com/\
python-telegram-bot/python-telegram-bot/wiki/Storing-bot%2C-user-and-\
chat-related-data>`_,
Args:
new_chat_id (:obj:`int`): The new chat id of the group.
Attributes:
new_chat_id (:obj:`int`): The new chat id of the group.
"""
__slots__ = ("new_chat_id",)
def __init__(self, new_chat_id: int):
super().__init__(f"Group migrated to supergroup. New chat id: {new_chat_id}")
self.new_chat_id = new_chat_id
def __reduce__(self) -> Tuple[type, Tuple[int]]: # type: ignore[override]
return self.__class__, (self.new_chat_id,)
class RetryAfter(TelegramError):
"""
Raised when flood limits where exceeded.
.. versionchanged:: 20.0
:attr:`retry_after` is now an integer to comply with the Bot API.
Args:
retry_after (:obj:`int`): Time in seconds, after which the bot can retry the request.
Attributes:
retry_after (:obj:`int`): Time in seconds, after which the bot can retry the request.
"""
__slots__ = ("retry_after",)
def __init__(self, retry_after: int):
super().__init__(f"Flood control exceeded. Retry in {retry_after} seconds")
self.retry_after = retry_after
def __reduce__(self) -> Tuple[type, Tuple[float]]: # type: ignore[override]
return self.__class__, (self.retry_after,)
class Conflict(TelegramError):
"""Raised when a long poll or webhook conflicts with another one."""
__slots__ = ()
def __reduce__(self) -> Tuple[type, Tuple[str]]:
return self.__class__, (self.message,)
class PassportDecryptionError(TelegramError):
"""Something went wrong with decryption.
.. versionchanged:: 20.0
This class was previously named ``TelegramDecryptionError`` and was available via
``telegram.TelegramDecryptionError``.
"""
__slots__ = ("_msg",)
def __init__(self, message: Union[str, Exception]):
super().__init__(f"PassportDecryptionError: {message}")
self._msg = str(message)
def __reduce__(self) -> Tuple[type, Tuple[str]]:
return self.__class__, (self._msg,)
|
{
"content_hash": "f52c68a78e5727f7936d5e9bbbaabd0e",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 93,
"avg_line_length": 25.692682926829267,
"alnum_prop": 0.596544522498576,
"repo_name": "tzpBingo/github-trending",
"id": "7e92bfaa90a09b54dbf1f03ccd1a0b629429743d",
"size": "6076",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "codespace/python/telegram/error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "49985109"
},
{
"name": "Shell",
"bytes": "18039"
}
],
"symlink_target": ""
}
|
import json
import re
import csv
import time
import sys
import argparse
class CFNDevice(object):
def __init__(self, atlaskey, name, devicetype, facility, street, city, state, country, accountid, receivedat, eventlogid, message, osversion,
osservicepack, hotfixname, KB, hotfixstatus ,installdate, result):
self.atlaskey = atlaskey
self.name = name
self.devicetype = devicetype
self.facility = facility
self.street = street #4
self.city = city
self.state = state
self.country = country
self.accountid = accountid
self.receivedat = receivedat
self.eventlogid = eventlogid
self.message = message
self.osversion = osversion
self.osservicepack = osservicepack
self.hotfixname = hotfixname #14
self.KB = KB
self.hotfixstatus = hotfixstatus #16
self.installdate = installdate
self.result = result
def ProcessArgs():
parser = argparse.ArgumentParser(description='Process file. If no input arg, will process query_result.csv. If no output arg, result will be in result.csv')
parser.add_argument('-i','--input', help='Input file name',required=False)
parser.add_argument('-o','--output',help='Output file name', required=False)
args = parser.parse_args()
return args.input, args.output
def CompareKeys(str1, str2):
if str1 == str2:
return 0
elif str1 < str2:
return -1
else:
return 1
def binarySearch(aList, item):
first = 0;
last = len(aList) - 1
found = False
i = -1
while first <= last and not found:
mid = (first+last)//2
temp = CompareKeys(item, aList[mid].atlaskey)
if temp == 0:
i = mid
found = True
return i
elif temp == -1:
last = mid - 1
else:
first = mid + 1
return i
def insertItem(aList, item):
aList.append(item)
i = len(aList) - 1
current = aList[i]
while item.atlaskey < aList[i-1].atlaskey and i > 0:
aList[i] = aList[i-1]
i = i - 1
aList[i] = item
def useList():
ifile=''
ofile=''
ifile, ofile = ProcessArgs();
if ifile is None:
ifile='query_result.csv'
if ofile is None:
ofile = 'result.csv'
#used to time how quick this thing runs. Not very accurate, but good enough
start = time.time()
#using a list
Devices = []
#using a dictionary, which is a hash table. Thi sshould be faster than even the binary search implementation.
count = 0
with open(ifile) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
next(readCSV, None) # skip the headers
for row in readCSV:
count = count + 1
temp_index = binarySearch(Devices, row[0])
if temp_index == -1:
insertItem(Devices, CFNDevice(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10], row[11], row[12], row[13], row[14], row[15], row[16], row[17], True if row[18] == 'Safe' else False))
#print("Added device " + row[0] + " " + row[1])
else:
if row[0] == Devices[temp_index].atlaskey:
if row[16]=='Succeeded' and row[18] == 'Safe':
Devices[temp_index].receivedat = row[9]
Devices[temp_index].eventlogid = row[10]
Devices[temp_index].message = row[11]
Devices[temp_index].hotfixname = row[14]
Devices[temp_index].KB = row[15]
Devices[temp_index].hotfixstatus = row[16]
Devices[temp_index].installdate = row[17]
Devices[temp_index].result = True
#print ("Updated device: " + row[0] + " " + row[1])
output = open(ofile, 'w')
output.write('atlaskey, name, devicetype, facility, street, city, state, country, accountid, receivedat, eventlogid, message, osversion, osservicepack, hotfixname, KB, hotfixstatus, installdate, result\n')
for dev in Devices:
output.write('"' + dev.atlaskey.replace('"','""') + '"' + ',')
output.write('"' + dev.name.replace('"','""') + '"' + ',')
output.write('"' + dev.devicetype.replace('"','""') + '"' + ',')
output.write('"' + dev.facility.replace('"','""') + '"' + ',')
output.write('"' + dev.street.replace('"','""') + '"' + ',')
output.write('"' + dev.city.replace('"','""') + '"' + ',')
output.write('"' + dev.state.replace('"','""') + '"' + ',')
output.write('"' + dev.country.replace('"','""') + '"' + ',')
output.write('"' + dev.accountid.replace('"','""') + '"' + ',')
output.write('"' + dev.receivedat.replace('"','""') + '"' + ',')
output.write('"' + dev.eventlogid.replace('"','""') + '"' + ',')
output.write('"' + dev.message.replace('"','""') + '"' + ',')
output.write('"' + dev.osversion.replace('"','""') + '"' + ',')
output.write('"' + dev.osservicepack.replace('"','""') + '"' + ',')
output.write('"' + dev.hotfixname.replace('"','""') + '"' + ',')
output.write('"' + dev.KB.replace('"','""') + '"' + ',')
output.write('"' + dev.hotfixstatus.replace('"','""') + '"' + ',')
output.write('"' + dev.installdate.replace('"','""') + '"' + ',')
output.write(str(dev.result) + ',')
output.write('\n')
output.close()
end = time.time()
print(end - start)
print("Total Devices: " + str(len(Devices)))
def useDict():
ifile=''
ofile=''
ifile, ofile = ProcessArgs();
if ifile is None:
ifile='query_result.csv'
if ofile is None:
ofile = 'result.csv'
#used to time how quick this thing runs. Not very accurate, but good enough
start = time.time()
#using a list
Devices = {}
#using a dictionary, which is a hash table. Thi sshould be faster than even the binary search implementation.
count = 0
with open(ifile) as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
next(readCSV, None) # skip the headers
for row in readCSV:
count = count + 1
temp = Devices.get(row[0])
if temp is None:
Devices[row[0]] = CFNDevice(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10], row[11], row[12], row[13], row[14], row[15], row[16], row[17], True if row[18] == 'Safe' else False)
#print("Added device " + row[0] + " " + row[1])
else:
if row[16]=='Succeeded' and row[18] == 'Safe':
Devices[row[0]] = CFNDevice(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10], row[11], row[12], row[13], row[14], row[15], row[16], row[17], True)
#print ("Updated device: " + row[0] + " " + row[1])
output = open(ofile, 'w')
output.write('atlaskey, name, devicetype, facility, street, city, state, country, accountid, receivedat, eventlogid, message, osversion, osservicepack, hotfixname, KB, hotfixstatus, installdate, result\n')
for k, dev in Devices.items():
output.write('"' + dev.atlaskey.replace('"','""') + '"' + ',')
output.write('"' + dev.name.replace('"','""') + '"' + ',')
output.write('"' + dev.devicetype.replace('"','""') + '"' + ',')
output.write('"' + dev.facility.replace('"','""') + '"' + ',')
output.write('"' + dev.street.replace('"','""') + '"' + ',')
output.write('"' + dev.city.replace('"','""') + '"' + ',')
output.write('"' + dev.state.replace('"','""') + '"' + ',')
output.write('"' + dev.country.replace('"','""') + '"' + ',')
output.write('"' + dev.accountid.replace('"','""') + '"' + ',')
output.write('"' + dev.receivedat.replace('"','""') + '"' + ',')
output.write('"' + dev.eventlogid.replace('"','""') + '"' + ',')
output.write('"' + dev.message.replace('"','""') + '"' + ',')
output.write('"' + dev.osversion.replace('"','""') + '"' + ',')
output.write('"' + dev.osservicepack.replace('"','""') + '"' + ',')
output.write('"' + dev.hotfixname.replace('"','""') + '"' + ',')
output.write('"' + dev.KB.replace('"','""') + '"' + ',')
output.write('"' + dev.hotfixstatus.replace('"','""') + '"' + ',')
output.write('"' + dev.installdate.replace('"','""') + '"' + ',')
output.write(str(dev.result) + ',')
output.write('\n')
output.close()
end = time.time()
print(end - start)
print("Rows in file: " + str(count))
print("Total Devices: " + str(len(Devices)))
def main():
# Turns out dict performs sooo much faster because it is implemented as a hashtable.....
# Runtimes:
# C# list: 30s
# Python list with binary search: 75s
# Python dict: 2.45s !!!
#useList()
useDict()
if __name__ == "__main__":
main()
|
{
"content_hash": "ca10a0587e8a5af54cc5aabee11d74cb",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 236,
"avg_line_length": 39.70940170940171,
"alnum_prop": 0.5130219543693499,
"repo_name": "abuenavista/PythonStuff",
"id": "126b51b4cc646657cfc08f94461a7e75833a51d2",
"size": "9292",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Dict - eg hash table/readLines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "9502"
},
{
"name": "Python",
"bytes": "40874"
}
],
"symlink_target": ""
}
|
import pandas as pd
import numpy as np
import scipy.stats as scs
from skimage.io import imread
import os
__author__ = 'mahieke'
def align(data):
"""
Return the data aligned with its mean values.
Args:
data (pandas.core.frame.DataFrame):
Dataset which should be aligned
Returns:
pandas.core.frame.DataFrame: New DataFrame with aligned data.
"""
mean = data.mean()
return data.sub(mean)
def pca(data, l=None):
"""
principal_component_analysis
Get base vectors q_i, projection of x_i on q_i and the standard
deviation of all q_i. User can Limit dimension with l.
Args:
data (pandas.core.frame.DataFrame):
Dataset which should be aligned.
l (int): Maximum amount of variables of Output
Returns:
Qi, Ai, Sigma (3-tuple): Qi are the basis vectors of the
principal components. Ai are the new principal
components. Sigma is the standard deviation of the
principal components.
"""
d, n = data.shape
if l:
l = min(l,n)
else:
l = n
aligned_data = align(data)
# singular value decomposition
U, d, V = np.linalg.svd(aligned_data, full_matrices=False)
# build diagonal matrix
D = np.diag(d)
# base vector
Qi = V[:,:]
# projection
Ai = U.dot(D)[:,:]
# standard deviation
Sigma = d[:]
return Qi, Ai, Sigma
def pca_correlation(data, pca_data, l=None):
"""
Creates a DataFrame with the correlation between the
pca_data and the original data frame. Principal
components can be limited by l.
Args:
data (pandas.core.frame.DataFrame):
Original data which shlould be correlated with pca_data.
pca_data (pandas.core.frame.DataFrame):
Principal component data which will be correlated with data.
l (int): Maximum amount of variables of Output
Returns (pandas.core.frame.DataFrame):
Correlation matrix of pca_data and data
"""
d,n = data.shape
if l:
l = min(l,n)
else:
l = n
# corrolate each dataset of pca_data with dataset data
corr = [[scs.pearsonr(data[lbl], a)[0] for lbl in data] for a in pca_data.transpose()[:l,:]]
return pd.DataFrame(corr, columns=data.columns, index=["a{}".format(s) for s in range(0,l)])
def get_person_images(path, ext, min):
"""
Returns all directories which have a min amount of files of type ext.
Args:
path (string): path entrypoint wehre to start
ext (string): extension of the files
min (int): minimal amount of files in directory
Returns (list):
A list with tuples containing the root path an the containing files
of the matching directories.
"""
import re
# for all leaves in directory tree
for root, dirs, files in os.walk(path):
if not dirs:
filtered_files = [x for x in files if re.search('{}$'.format(ext), x)]
if len(filtered_files) >= min:
yield (root, files)
def imstack2vectors(image):
"""
Args:
image:
Returns:
"""
s = image.shape
if len(s) == 3:
return np.asarray([image[:,:,index].flatten() for index in range(s[2])]).T
else:
return image.flatten()
def get_dataset(root, files, scale_factor=1):
"""
Args:
root (string): path to images
files (list): list of image files in directory root
scale_factor (int): scale image by this factor
Returns (dict):
Returns _data_ in a numpy array and metadata (_name_ and _amount_ of data)
keys: 'amount', 'name', 'data'
"""
name = root.split('/')[-1]
amount_img = len(files)
frame = []
for f in files:
img = imread('{}/{}'.format(root,f), as_grey=True)
# make it work if someone
scale = int(scale_factor)
if scale > 1:
img = img[::scale,::scale]
img = imstack2vectors(img)
frame.append(img)
nparray = np.array(frame)
return name, nparray, amount_img
|
{
"content_hash": "66051dc9f1e355a62424c4aa61828b11",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 97,
"avg_line_length": 25.539877300613497,
"alnum_prop": 0.5935623348546721,
"repo_name": "mahieke/maschinelles_lernen",
"id": "93f601ba4b13e3db3d58f10ef8858f3c5beb6e11",
"size": "4163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "a2/util/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2590263"
},
{
"name": "Python",
"bytes": "21689"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
_RAW_SALES_TABLE = 'raw_sales'
_AGGREGATED_SALES_TABLE = 'aggregated_sales'
|
{
"content_hash": "6a015369eef5bdb9513c927a878bb525",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 44,
"avg_line_length": 23.6,
"alnum_prop": 0.7372881355932204,
"repo_name": "alphagov/performanceplatform-collector",
"id": "ec52877cdcb75f5fbe09cb07461b89a1ac254de9",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "performanceplatform/collector/gcloud/table_names.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "17467"
},
{
"name": "Python",
"bytes": "277176"
},
{
"name": "Shell",
"bytes": "6286"
}
],
"symlink_target": ""
}
|
import pytest
import meshio
from . import helpers
@pytest.mark.parametrize(
"mesh",
[
helpers.empty_mesh,
helpers.tri_mesh,
helpers.quad_mesh,
helpers.tri_quad_mesh,
helpers.tet_mesh,
helpers.hex_mesh,
helpers.add_cell_data(
helpers.tri_mesh,
[("avsucd:material", (), int), ("a", (), float), ("b", (3,), float)],
),
helpers.add_point_data(helpers.add_point_data(helpers.tri_mesh, 1), 3),
],
)
def test(mesh, tmp_path):
helpers.write_read(tmp_path, meshio.avsucd.write, meshio.avsucd.read, mesh, 1.0e-13)
|
{
"content_hash": "106e6320d683efb9787e2fea3c485a05",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 88,
"avg_line_length": 24.8,
"alnum_prop": 0.5741935483870968,
"repo_name": "nschloe/meshio",
"id": "679cfaa78b965fd53a09aa551b02016c6b524d98",
"size": "620",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_avsucd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "124"
},
{
"name": "Python",
"bytes": "639630"
}
],
"symlink_target": ""
}
|
import webapp2
class MainHandler(webapp2.RequestHandler):
def get(self):
self.response.write('Hello world!')
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
|
{
"content_hash": "ecbd5affa69f47732bf1efdccb2ff683",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 43,
"avg_line_length": 21.444444444444443,
"alnum_prop": 0.6839378238341969,
"repo_name": "luisibanez/cssi-appengine-introduction-01",
"id": "df1ecf61c9a9f2eb260da9c2a00052e55227dfa1",
"size": "193",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/example01/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "103094"
},
{
"name": "Python",
"bytes": "2431"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: azure_rm_manageddisk_facts
version_added: "2.4"
short_description: Get managed disk facts
description:
- Get facts for a specific managed disk or all managed disks.
notes:
- This module was called C(azure_rm_managed_disk_facts) before Ansible 2.8. The usage did not change.
options:
name:
description:
- Limit results to a specific managed disk.
type: str
resource_group:
description:
- Limit results to a specific resource group.
type: str
tags:
description:
- Limit results by providing a list of tags.
- Format tags as 'key' or 'key:value'.
type: list
extends_documentation_fragment:
- azure
- azure_tags
author:
- Bruno Medina (@brusMX)
'''
EXAMPLES = r'''
- name: Get facts for one managed disk
azure_rm_manageddisk_facts:
name: Testing
resource_group: myResourceGroup
- name: Get facts for all managed disks
azure_rm_manageddisk_facts:
- name: Get facts by tags
azure_rm_manageddisk_facts:
tags:
- testing
'''
RETURN = r'''
azure_managed_disk:
description: List of managed disk dicts.
returned: always
type: list
contains:
id:
description:
- Resource id.
type: str
name:
description:
- Name of the managed disk.
type: str
location:
description:
- Valid Azure location.
type: str
storage_account_type:
description:
- Type of storage for the managed disk.
- See https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disks-types for more information about this type.
type: str
sample: Standard_LRS
create_option:
description:
- Create option of the disk
type: str
sample: copy
source_uri:
description:
- URI to a valid VHD file to be used or the resource ID of the managed disk to copy.
type: str
os_type:
description:
- "Type of Operating System: C(linux) or C(windows)."
type: str
disk_size_gb:
description:
- Size in GB of the managed disk to be created.
type: str
managed_by:
description:
- Name of an existing virtual machine with which the disk is or will be associated, this VM should be in the same resource group.
type: str
tags:
description:
- Tags to assign to the managed disk.
type: dict
sample: { "tag": "value" }
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
except Exception:
# handled in azure_rm_common
pass
# duplicated in azure_rm_manageddisk
def managed_disk_to_dict(managed_disk):
create_data = managed_disk.creation_data
return dict(
id=managed_disk.id,
name=managed_disk.name,
location=managed_disk.location,
tags=managed_disk.tags,
create_option=create_data.create_option.lower(),
source_uri=create_data.source_uri or create_data.source_resource_id,
disk_size_gb=managed_disk.disk_size_gb,
os_type=managed_disk.os_type.lower() if managed_disk.os_type else None,
storage_account_type=managed_disk.sku.name if managed_disk.sku else None,
managed_by=managed_disk.managed_by,
zone=managed_disk.zones[0] if managed_disk.zones and len(managed_disk.zones) > 0 else ''
)
class AzureRMManagedDiskFacts(AzureRMModuleBase):
"""Utility class to get managed disk facts"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str'
),
name=dict(
type='str'
),
tags=dict(
type='str'
),
)
self.results = dict(
ansible_facts=dict(
azure_managed_disk=[]
)
)
self.resource_group = None
self.name = None
self.create_option = None
self.source_uri = None
self.source_resource_uri = None
self.tags = None
super(AzureRMManagedDiskFacts, self).__init__(
derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.results['ansible_facts']['azure_managed_disk'] = (
self.get_item() if self.name
else (self.list_items_by_resource_group() if self.resource_group else self.list_items())
)
return self.results
def get_item(self):
"""Get a single managed disk"""
item = None
result = []
try:
item = self.compute_client.disks.get(
self.resource_group,
self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
result = [managed_disk_to_dict(item)]
return result
def list_items(self):
"""Get all managed disks"""
try:
response = self.compute_client.disks.list()
except CloudError as exc:
self.fail('Failed to list all items - {0}'.format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(managed_disk_to_dict(item))
return results
def list_items_by_resource_group(self):
"""Get managed disks in a resource group"""
try:
response = self.compute_client.disks.list_by_resource_group(resource_group_name=self.resource_group)
except CloudError as exc:
self.fail('Failed to list items by resource group - {0}'.format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(managed_disk_to_dict(item))
return results
def main():
"""Main module execution code path"""
AzureRMManagedDiskFacts()
if __name__ == '__main__':
main()
|
{
"content_hash": "17711a98b37171d4775debfb9532d1b1",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 145,
"avg_line_length": 28.857758620689655,
"alnum_prop": 0.5717699775952203,
"repo_name": "SergeyCherepanov/ansible",
"id": "3c329a705821f5c2bec69c8250d3218d6b54a67d",
"size": "6912",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/cloud/azure/_azure_rm_managed_disk_facts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.base_user import BaseUserManager
from django.utils import timezone
class AccountManager(BaseUserManager):
def _create_user(self, email, password, is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given email and password.
"""
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(
email=email, is_staff=is_staff, is_active=True, is_superuser=is_superuser,
last_login=now, date_joined=now, **extra_fields
)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
return self._create_user(email, password, False, False, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, True, **extra_fields)
|
{
"content_hash": "7a9c9b7c513f1c2570e0459de2d1dbf5",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 86,
"avg_line_length": 38.148148148148145,
"alnum_prop": 0.6456310679611651,
"repo_name": "uda/djaccount",
"id": "2c9d445f62657f6cc4814d092651e63526c97b03",
"size": "1030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "account/managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2035"
},
{
"name": "Python",
"bytes": "14781"
}
],
"symlink_target": ""
}
|
"""Generators for custom listops tasks."""
import csv
import random
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
flags.DEFINE_string(
'task', default='basic',
help='Name of task to create.')
flags.DEFINE_integer(
'num_train_samples', default=96000,
help=('Number of train samples.'))
flags.DEFINE_integer(
'num_valid_samples', default=2000,
help=('Number of test samples.'))
flags.DEFINE_integer(
'num_test_samples', default=2000,
help=('Number of test samples.'))
flags.DEFINE_integer(
'max_depth', default=10,
help=('maximum tree depth of training sequences.'))
flags.DEFINE_integer(
'max_args', default=10,
help=('maximum number of arguments per operator in training sequences.'))
flags.DEFINE_integer(
'max_length', default=2000,
help=('maximum length per sequence in training sequences.'))
flags.DEFINE_integer(
'min_length', default=500,
help=('minimum length per sequence in training sequences.'))
flags.DEFINE_string(
'output_dir', default='output_dir',
help='Directory to output files.')
FLAGS = flags.FLAGS
MIN = '[MIN'
MAX = '[MAX'
MED = '[MED'
FIRST = '[FIRST'
LAST = '[LAST'
SUM_MOD = '[SM'
END = ']'
OPERATORS = [MIN, MAX, MED, SUM_MOD] # , FIRST, LAST]
VALUES = range(10)
VALUE_P = 0.25
def generate_tree(depth, max_depth, max_args):
"""Generate tree-like equations.
Args:
depth: current depth of the node, int.
max_depth: maximum depth of the tree, int.
max_args: maximum number of arguments per operator, int.
Returns:
The root node of a tree structure.
"""
if depth < max_depth:
r = random.random()
else:
r = 1
if r > VALUE_P:
value = random.choice(VALUES)
return value, 1
else:
length = 2
num_values = random.randint(2, max_args)
values = []
for _ in range(num_values):
sub_t, sub_l = generate_tree(depth + 1, max_depth, max_args)
values.append(sub_t)
length += sub_l
op = random.choice(OPERATORS)
t = (op, values[0])
for value in values[1:]:
t = (t, value)
t = (t, END)
return t, length
def to_string(t, parens=True):
if isinstance(t, str):
return t
elif isinstance(t, int):
return str(t)
else:
if parens:
return '( ' + to_string(t[0]) + ' ' + to_string(t[1]) + ' )'
def to_value(t):
"""Compute the output of equation t.
Args:
t: a tree structure that represents equation t, list.
Returns:
The result of equation t, int.
"""
if not isinstance(t, tuple):
return t
l = to_value(t[0])
r = to_value(t[1])
if l in OPERATORS: # Create an unsaturated function.
return (l, [r])
elif r == END: # l must be an unsaturated function.
if l[0] == MIN:
return min(l[1])
elif l[0] == MAX:
return max(l[1])
elif l[0] == FIRST:
return l[1][0]
elif l[0] == LAST:
return l[1][-1]
elif l[0] == MED:
return int(np.median(l[1]))
elif l[0] == SUM_MOD:
return np.sum(l[1]) % 10
elif isinstance(l, tuple):
# We've hit an unsaturated function and an argument.
return (l[0], l[1] + [r])
def write_to_file(data, fp):
"""Write to file output."""
tf.logging.info(type(data))
tf.logging.info('Writing {} samples to {}'.format(len(data), fp + '.tsv'))
with tf.io.gfile.GFile(fp + '.tsv', 'w+') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerow(['Source', 'Target'])
writer.writerows(data)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
tf.logging.info('Start dataset construction')
data = set()
num_samples = FLAGS.num_train_samples \
+ FLAGS.num_test_samples + FLAGS.num_valid_samples
while len(data) < num_samples:
tree, length = generate_tree(1, FLAGS.max_depth, FLAGS.max_args)
if length > FLAGS.min_length and length < FLAGS.max_length:
data.add(tree)
if len(data) % 1000 == 0:
tf.logging.info('Processed {}'.format(len(data)))
print('Processed {}'.format(len(data)))
train = []
for example in data:
train.append([to_string(example), to_value(example)])
tf.logging.info('Finished running dataset construction')
val = train[FLAGS.num_train_samples:]
test = val[FLAGS.num_valid_samples:]
val = val[:FLAGS.num_valid_samples]
train = train[:FLAGS.num_train_samples]
tf.logging.info('Dataset size: %d/%d/%d' % (len(train), len(val), len(test)))
write_to_file(train, FLAGS.output_dir + '/{}_train'.format(FLAGS.task))
write_to_file(val, FLAGS.output_dir + '/{}_val'.format(FLAGS.task))
write_to_file(test, FLAGS.output_dir + '/{}_test'.format(FLAGS.task))
tf.logging.info('Finished writing all to file')
if __name__ == '__main__':
app.run(main)
|
{
"content_hash": "7129e8c51e0adfdce6a375b0e144160b",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 79,
"avg_line_length": 26.57777777777778,
"alnum_prop": 0.6295986622073578,
"repo_name": "google-research/long-range-arena",
"id": "bfcc53d7b545c79ac1e6866d1206e5187ea0c2b8",
"size": "5356",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "lra_benchmarks/data/listops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "507289"
}
],
"symlink_target": ""
}
|
def set_prefs(prefs):
"""This function is called before opening the project"""
# Specify which files and folders to ignore in the project.
# Changes to ignored resources are not added to the history and
# VCSs. Also they are not returned in `Project.get_files()`.
# Note that ``?`` and ``*`` match all characters but slashes.
# '*.pyc': matches 'test.pyc' and 'pkg/test.pyc'
# 'mod*.pyc': matches 'test/mod1.pyc' but not 'mod/1.pyc'
# '.svn': matches 'pkg/.svn' and all of its children
# 'build/*.o': matches 'build/lib.o' but not 'build/sub/lib.o'
# 'build//*.o': matches 'build/lib.o' and 'build/sub/lib.o'
prefs['ignored_resources'] = ['*.pyc', '*~', '.ropeproject',
'.hg', '.svn', '_svn', '.git']
# Specifies which files should be considered python files. It is
# useful when you have scripts inside your project. Only files
# ending with ``.py`` are considered to be python files by
# default.
#prefs['python_files'] = ['*.py']
# Custom source folders: By default rope searches the project
# for finding source folders (folders that should be searched
# for finding modules). You can add paths to that list. Note
# that rope guesses project source folders correctly most of the
# time; use this if you have any problems.
# The folders should be relative to project root and use '/' for
# separating folders regardless of the platform rope is running on.
# 'src/my_source_folder' for instance.
#prefs.add('source_folders', 'src')
# You can extend python path for looking up modules
#prefs.add('python_path', '~/python/')
prefs.add('source_folders', '/usr/local/google_appengine')
# You can extend python path for looking up modules
prefs.add('python_path', '/usr/local/google_appengine/')
# Should rope save object information or not.
prefs['save_objectdb'] = True
prefs['compress_objectdb'] = False
# If `True`, rope analyzes each module when it is being saved.
prefs['automatic_soa'] = True
# The depth of calls to follow in static object analysis
prefs['soa_followed_calls'] = 0
# If `False` when running modules or unit tests "dynamic object
# analysis" is turned off. This makes them much faster.
prefs['perform_doa'] = True
# Rope can check the validity of its object DB when running.
prefs['validate_objectdb'] = True
# How many undos to hold?
prefs['max_history_items'] = 32
# Shows whether to save history across sessions.
prefs['save_history'] = True
prefs['compress_history'] = False
# Set the number spaces used for indenting. According to
# :PEP:`8`, it is best to use 4 spaces. Since most of rope's
# unit-tests use 4 spaces it is more reliable, too.
prefs['indent_size'] = 4
# Builtin and c-extension modules that are allowed to be imported
# and inspected by rope.
prefs['extension_modules'] = []
# Add all standard c-extensions to extension_modules list.
prefs['import_dynload_stdmods'] = True
# If `True` modules with syntax errors are considered to be empty.
# The default value is `False`; When `False` syntax errors raise
# `rope.base.exceptions.ModuleSyntaxError` exception.
prefs['ignore_syntax_errors'] = False
# If `True`, rope ignores unresolvable imports. Otherwise, they
# appear in the importing namespace.
prefs['ignore_bad_imports'] = False
def project_opened(project):
"""This function is called after opening the project"""
# Do whatever you like here!
|
{
"content_hash": "482aa0f2ecdb47a91705a8b210a7a773",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 71,
"avg_line_length": 41.52873563218391,
"alnum_prop": 0.6639911430943815,
"repo_name": "greensnark/appenginedatastoretest",
"id": "9cc2504607e4871685397001d4c6219578cf8d6d",
"size": "3643",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py-load/.ropeproject/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "1005"
},
{
"name": "Java",
"bytes": "12374"
},
{
"name": "Python",
"bytes": "12730"
}
],
"symlink_target": ""
}
|
import logging
from flask import Blueprint, request, abort
from .tasks import load_fdp_task
from .config import get_connection_string, _set_connection_string
FDPLoaderBlueprint = Blueprint('FDPLoader', __name__)
@FDPLoaderBlueprint.route('/')
def load():
package = request.args.get('package')
callback = request.args.get('callback')
logging.info('Requested load of "%s" with callback "%s"\n' % (package, callback))
if package is not None and callback is not None:
load_fdp_task.delay(package, callback, get_connection_string())
return ""
else:
abort(400)
def configure_loader_api(connection_string = None):
if connection_string is not None:
_set_connection_string(connection_string)
return FDPLoaderBlueprint
|
{
"content_hash": "0f266ce59f3219eb44f51ce58154c32d",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 85,
"avg_line_length": 31,
"alnum_prop": 0.704516129032258,
"repo_name": "openspending/babbage.fiscal-data-package",
"id": "8b396bee03e117f36822584e43295320209dd1a4",
"size": "775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "babbage_fiscal/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37953"
}
],
"symlink_target": ""
}
|
from System.Threading import *
def condition(switchTime):
switchCount = 1
#loop forever
while switchCount > 0:
hc.SwitchEAndWait()
print "Switch count "+str(switchCount)
System.Threading.Thread.Sleep(1000 * switchTime)
switchCount = switchCount + 1
def run_script():
print "Use condition(switchTime)"
|
{
"content_hash": "7547652003ed430f15aaec220be9f4c3",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 50,
"avg_line_length": 19.9375,
"alnum_prop": 0.7366771159874608,
"repo_name": "ColdMatter/EDMSuite",
"id": "feb7171e06d51547f60b3ba8c16dde464f1085d1",
"size": "319",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "EDMScripts/OldScripts/SimpleCondition.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2489"
},
{
"name": "C#",
"bytes": "6547131"
},
{
"name": "F#",
"bytes": "1565"
},
{
"name": "Forth",
"bytes": "767"
},
{
"name": "HTML",
"bytes": "241926"
},
{
"name": "Mathematica",
"bytes": "452861"
},
{
"name": "Python",
"bytes": "798129"
},
{
"name": "Shell",
"bytes": "33"
},
{
"name": "TSQL",
"bytes": "1768"
},
{
"name": "TeX",
"bytes": "8393"
}
],
"symlink_target": ""
}
|
"""
@author: Adam Reinhold Von Fisher - https://www.linkedin.com/in/adamrvfisher/
"""
#This is part of a kth fold optimization tool
#Import modules
from DefRSIPredictor import DefRSIPredictor
import numpy as np
import pandas as pd
from YahooGrabber import YahooGrabber
#Read in data
Aggregate = pd.read_pickle('TLTAGGSHARPE023')
Aggregate = Aggregate.loc[:,~Aggregate.columns.duplicated()]
#Assign ticker
ticker = 'TMF'
#Request/read in data
s = YahooGrabber(ticker)
s1 = pd.read_pickle('TLTAGGAdvice07_50') # this is just for testing with a graph
#Transfer column
s['Advice'] = s1['Advice']
#Iterable
#ranger = range(1,len(s)+1)
#Creating recurrent incremental data ranges
#dictionary = { r : s.loc[s.index[:r],:] for r in ranger}
#triumph = []
#for r in ranger:
# q = dictionary[r]
# result = DefRSIPredictor(Aggregate, q)
# triumph.append(result)
# print(r)
# print(result)
#TheAdvice = pd.Series(triumph, index=s.index)
#s['Advice'] = TheAdvice
#Calculate log returns
s['LogRet'] = np.log(s['Adj Close']/s['Adj Close'].shift(1))
s['LogRet'] = s['LogRet'].fillna(0)
#Directional methodology
s['Regime'] = np.where(s['Advice'] > .706102, 1, 0)
s['Regime'] = np.where(s['Advice'] < -.644197, -1, s['Regime'])
#Apply returns to direction
s['Strategy'] = (s['Regime']).shift(1)*s['LogRet']
s['Strategy'] = s['Strategy'].fillna(0)
#Variable assignment
endgains = 1
endreturns = 1
#Performance metric
sharpe = (s['Strategy'].mean()-abs(s['LogRet'].mean()))/s['Strategy'].std()
#Calculate returns
for g in s['LogRet']:
slate = endreturns * (1+-g)
endreturns = slate
#Calculate returns
for h in s['Strategy']:
otherslate = endgains * (1+h)
endgains = otherslate
#For increased accuracy, remove first window values from TheAdvice
s[['LogRet', 'Strategy']].cumsum().apply(np.exp).plot(grid = True,
figsize = (8,5))
#Print metrics
print(s)
print(sharpe)
print(endreturns)
print(endgains)
|
{
"content_hash": "dbc86fb2752dcea042ff72f19a711e5b",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 80,
"avg_line_length": 29.536231884057973,
"alnum_prop": 0.654072620215898,
"repo_name": "adamrvfisher/TechnicalAnalysisLibrary",
"id": "11b576339ee019da027aabe268c8d763e0541058",
"size": "2063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "KthFold+Bonds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15514"
}
],
"symlink_target": ""
}
|
"""
Example Airflow DAG for DataprocSubmitJobOperator with hive job.
"""
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.operators.dataproc import (
DataprocCreateClusterOperator,
DataprocDeleteClusterOperator,
DataprocSubmitJobOperator,
)
from airflow.utils.trigger_rule import TriggerRule
ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
DAG_ID = "dataproc_hive"
PROJECT_ID = os.environ.get("SYSTEM_TESTS_GCP_PROJECT", "")
CLUSTER_NAME = f"cluster-dataproc-hive-{ENV_ID}"
REGION = "europe-west1"
ZONE = "europe-west1-b"
# Cluster definition
# [START how_to_cloud_dataproc_create_cluster]
CLUSTER_CONFIG = {
"master_config": {
"num_instances": 1,
"machine_type_uri": "n1-standard-4",
"disk_config": {"boot_disk_type": "pd-standard", "boot_disk_size_gb": 1024},
},
"worker_config": {
"num_instances": 2,
"machine_type_uri": "n1-standard-4",
"disk_config": {"boot_disk_type": "pd-standard", "boot_disk_size_gb": 1024},
},
}
# [END how_to_cloud_dataproc_create_cluster]
TIMEOUT = {"seconds": 1 * 24 * 60 * 60}
# [START how_to_cloud_dataproc_hive_config]
HIVE_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"hive_job": {"query_list": {"queries": ["SHOW DATABASES;"]}},
}
# [END how_to_cloud_dataproc_hive_config]
with models.DAG(
DAG_ID,
schedule_interval='@once',
start_date=datetime(2021, 1, 1),
catchup=False,
tags=["example", "dataproc"],
) as dag:
# [START how_to_cloud_dataproc_create_cluster_operator]
create_cluster = DataprocCreateClusterOperator(
task_id="create_cluster",
project_id=PROJECT_ID,
cluster_config=CLUSTER_CONFIG,
region=REGION,
cluster_name=CLUSTER_NAME,
)
# [END how_to_cloud_dataproc_create_cluster_operator]
hive_task = DataprocSubmitJobOperator(
task_id="hive_task", job=HIVE_JOB, region=REGION, project_id=PROJECT_ID
)
# [START how_to_cloud_dataproc_delete_cluster_operator]
delete_cluster = DataprocDeleteClusterOperator(
task_id="delete_cluster",
project_id=PROJECT_ID,
cluster_name=CLUSTER_NAME,
region=REGION,
)
# [END how_to_cloud_dataproc_delete_cluster_operator]
delete_cluster.trigger_rule = TriggerRule.ALL_DONE
create_cluster >> hive_task >> delete_cluster
from tests.system.utils.watcher import watcher
# This test needs watcher in order to properly mark success/failure
# when "teardown" task with trigger rule is part of the DAG
list(dag.tasks) >> watcher()
from tests.system.utils import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
|
{
"content_hash": "30be90e6bc45ca0b7c39fdacd373fcaf",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 88,
"avg_line_length": 29.329896907216494,
"alnum_prop": 0.6734622144112478,
"repo_name": "Acehaidrey/incubator-airflow",
"id": "bf8f3178a7be94aafc65ecdb2a9f6cc203eac8d0",
"size": "3632",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/system/providers/google/cloud/dataproc/example_dataproc_hive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21727510"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495253"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
}
|
from oslo_log import log as logging
from trove.common.i18n import _
from trove.common import instance as ds_instance
from trove.common.notification import EndNotification
from trove.guestagent import backup
from trove.guestagent.datastore.experimental.db2 import service
from trove.guestagent.datastore import manager
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
class Manager(manager.Manager):
"""
This is DB2 Manager class. It is dynamically loaded
based off of the datastore of the Trove instance.
"""
def __init__(self):
self.appStatus = service.DB2AppStatus()
self.app = service.DB2App(self.appStatus)
self.admin = service.DB2Admin()
super(Manager, self).__init__('db2')
@property
def status(self):
return self.appStatus
def do_prepare(self, context, packages, databases, memory_mb, users,
device_path, mount_point, backup_info,
config_contents, root_password, overrides,
cluster_config, snapshot):
"""This is called from prepare in the base class."""
if device_path:
device = volume.VolumeDevice(device_path)
device.unmount_device(device_path)
device.format()
device.mount(mount_point)
LOG.debug('Mounted the volume.')
self.app.update_hostname()
self.app.change_ownership(mount_point)
self.app.start_db()
if backup_info:
self._perform_restore(backup_info, context, mount_point)
def restart(self, context):
"""
Restart this DB2 instance.
This method is called when the guest agent
gets a restart message from the taskmanager.
"""
LOG.debug("Restart a DB2 server instance.")
self.app.restart()
def stop_db(self, context, do_not_start_on_reboot=False):
"""
Stop this DB2 instance.
This method is called when the guest agent
gets a stop message from the taskmanager.
"""
LOG.debug("Stop a given DB2 server instance.")
self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)
def create_database(self, context, databases):
LOG.debug("Creating database(s)." % databases)
with EndNotification(context):
self.admin.create_database(databases)
def delete_database(self, context, database):
LOG.debug("Deleting database %s." % database)
with EndNotification(context):
return self.admin.delete_database(database)
def list_databases(self, context, limit=None, marker=None,
include_marker=False):
LOG.debug("Listing all databases.")
return self.admin.list_databases(limit, marker, include_marker)
def create_user(self, context, users):
LOG.debug("Create user(s).")
with EndNotification(context):
self.admin.create_user(users)
def delete_user(self, context, user):
LOG.debug("Delete a user %s." % user)
with EndNotification(context):
self.admin.delete_user(user)
def get_user(self, context, username, hostname):
LOG.debug("Show details of user %s." % username)
return self.admin.get_user(username, hostname)
def list_users(self, context, limit=None, marker=None,
include_marker=False):
LOG.debug("List all users.")
return self.admin.list_users(limit, marker, include_marker)
def list_access(self, context, username, hostname):
LOG.debug("List all the databases the user has access to.")
return self.admin.list_access(username, hostname)
def start_db_with_conf_changes(self, context, config_contents):
LOG.debug("Starting DB2 with configuration changes.")
self.app.start_db_with_conf_changes(config_contents)
def _perform_restore(self, backup_info, context, restore_location):
LOG.info(_("Restoring database from backup %s.") % backup_info['id'])
try:
backup.restore(context, backup_info, restore_location)
except Exception:
LOG.exception(_("Error performing restore from backup %s.") %
backup_info['id'])
self.status.set_status(ds_instance.ServiceStatuses.FAILED)
raise
LOG.info(_("Restored database successfully."))
def create_backup(self, context, backup_info):
LOG.debug("Creating backup.")
backup.backup(context, backup_info)
|
{
"content_hash": "1c550b1b5e0a83cbe00e3524c6b29c13",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 77,
"avg_line_length": 37.85,
"alnum_prop": 0.6393659180977543,
"repo_name": "mmasaki/trove",
"id": "792cd90beec8c53fb0e3313106c4eb0c297cdb07",
"size": "5134",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "trove/guestagent/datastore/experimental/db2/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "88"
},
{
"name": "CSS",
"bytes": "21914"
},
{
"name": "JavaScript",
"bytes": "60539"
},
{
"name": "Python",
"bytes": "4204079"
},
{
"name": "Shell",
"bytes": "19186"
},
{
"name": "XSLT",
"bytes": "50542"
}
],
"symlink_target": ""
}
|
"""Test reproduce state for Fan."""
from homeassistant.core import State
from tests.common import async_mock_service
async def test_reproducing_states(hass, caplog):
"""Test reproducing Fan states."""
hass.states.async_set("fan.entity_off", "off", {})
hass.states.async_set("fan.entity_on", "on", {})
hass.states.async_set("fan.entity_speed", "on", {"speed": "high"})
hass.states.async_set("fan.entity_oscillating", "on", {"oscillating": True})
hass.states.async_set("fan.entity_direction", "on", {"direction": "forward"})
turn_on_calls = async_mock_service(hass, "fan", "turn_on")
turn_off_calls = async_mock_service(hass, "fan", "turn_off")
set_direction_calls = async_mock_service(hass, "fan", "set_direction")
oscillate_calls = async_mock_service(hass, "fan", "oscillate")
set_speed_calls = async_mock_service(hass, "fan", "set_speed")
# These calls should do nothing as entities already in desired state
await hass.helpers.state.async_reproduce_state(
[
State("fan.entity_off", "off"),
State("fan.entity_on", "on"),
State("fan.entity_speed", "on", {"speed": "high"}),
State("fan.entity_oscillating", "on", {"oscillating": True}),
State("fan.entity_direction", "on", {"direction": "forward"}),
],
blocking=True,
)
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
assert len(set_direction_calls) == 0
assert len(oscillate_calls) == 0
assert len(set_speed_calls) == 0
# Test invalid state is handled
await hass.helpers.state.async_reproduce_state(
[State("fan.entity_off", "not_supported")], blocking=True
)
assert "not_supported" in caplog.text
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
assert len(set_direction_calls) == 0
assert len(oscillate_calls) == 0
assert len(set_speed_calls) == 0
# Make sure correct services are called
await hass.helpers.state.async_reproduce_state(
[
State("fan.entity_on", "off"),
State("fan.entity_off", "on"),
State("fan.entity_speed", "on", {"speed": "low"}),
State("fan.entity_oscillating", "on", {"oscillating": False}),
State("fan.entity_direction", "on", {"direction": "reverse"}),
# Should not raise
State("fan.non_existing", "on"),
],
blocking=True,
)
assert len(turn_on_calls) == 1
assert turn_on_calls[0].domain == "fan"
assert turn_on_calls[0].data == {"entity_id": "fan.entity_off"}
assert len(set_direction_calls) == 1
assert set_direction_calls[0].domain == "fan"
assert set_direction_calls[0].data == {
"entity_id": "fan.entity_direction",
"direction": "reverse",
}
assert len(oscillate_calls) == 1
assert oscillate_calls[0].domain == "fan"
assert oscillate_calls[0].data == {
"entity_id": "fan.entity_oscillating",
"oscillating": False,
}
assert len(set_speed_calls) == 1
assert set_speed_calls[0].domain == "fan"
assert set_speed_calls[0].data == {"entity_id": "fan.entity_speed", "speed": "low"}
assert len(turn_off_calls) == 1
assert turn_off_calls[0].domain == "fan"
assert turn_off_calls[0].data == {"entity_id": "fan.entity_on"}
|
{
"content_hash": "951205d7e91affd269c033f97528c9a1",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 87,
"avg_line_length": 37.73033707865169,
"alnum_prop": 0.6063132817153067,
"repo_name": "joopert/home-assistant",
"id": "0dcd38580b8c780a3eba4db5f15ffdd51a73a43a",
"size": "3358",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "tests/components/fan/test_reproduce_state.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18670593"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.