text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
Uses Scikit-Learn to compute a best fit function, then draws it in the plot.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.metrics import mean_squared_error as mse
from operator import itemgetter
from yellowbrick.style.palettes import LINE_COLOR
from yellowbrick.exceptions import YellowbrickValueError
##########################################################################
## Module Constants
##########################################################################
# Names of the various estimator functions
LINEAR = 'linear'
QUADRATIC = 'quadratic'
EXPONENTIAL = 'exponential'
LOG = 'log'
SELECT_BEST = 'select_best'
##########################################################################
## Draw Line of Best Fit
##########################################################################
def draw_best_fit(X, y, ax, estimator='linear', **kwargs):
"""
Uses Scikit-Learn to fit a model to X and y then uses the resulting model
to predict the curve based on the X values. This curve is drawn to the ax
(matplotlib axis) which must be passed as the third variable.
The estimator function can be one of the following:
- ``'linear'``: Uses OLS to fit the regression
- ``'quadratic'``: Uses OLS with Polynomial order 2
- ``'exponential'``: Not implemented yet
- ``'log'``: Not implemented yet
- ``'select_best'``: Selects the best fit via MSE
The remaining keyword arguments are passed to ax.plot to define and
describe the line of best fit.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
estimator : string, default: 'linear'
The name of the estimator function used to draw the best fit line.
The estimator can currently be one of linear, quadratic, exponential,
log, or select_best. The select best method uses the minimum MSE to
select the best fit line.
kwargs : dict
Keyword arguments to pass to the matplotlib plot function to style and
label the line of best fit. By default, the standard line color is
used unless the color keyword argument is passed in.
Returns
-------
ax : matplotlib Axes
The axes with the line drawn on it.
"""
# Estimators are the types of best fit lines that can be drawn.
estimators = {
LINEAR: fit_linear, # Uses OLS to fit the regression
QUADRATIC: fit_quadratic, # Uses OLS with Polynomial order 2
EXPONENTIAL: fit_exponential, # Not implemented yet
LOG: fit_log, # Not implemented yet
SELECT_BEST: fit_select_best, # Selects the best fit via MSE
}
# Check to make sure that a correct estimator value was passed in.
if estimator not in estimators:
raise YellowbrickValueError(
"'{}' not a valid type of estimator; choose from {}".format(
estimator, ", ".join(estimators.keys())
)
)
# Then collect the estimator function from the mapping.
estimator = estimators[estimator]
# Ensure that X and y are the same length
if len(X) != len(y):
raise YellowbrickValueError((
"X and y must have same length:"
" X len {} doesn't match y len {}!"
).format(len(X), len(y)))
# Ensure that X and y are np.arrays
X = np.array(X)
y = np.array(y)
# Verify that X is a two dimensional array for Scikit-Learn esitmators
# and that its dimensions are (n, 1) where n is the number of rows.
if X.ndim < 2:
X = X[:,np.newaxis] # Reshape X into the correct dimensions
if X.ndim > 2:
raise YellowbrickValueError(
"X must be a (1,) or (n,1) dimensional array not {}".format(X.shape)
)
# Verify that y is a (n,) dimensional array
if y.ndim > 1:
raise YellowbrickValueError(
"y must be a (1,) dimensional array not {}".format(y.shape)
)
# Uses the estimator to fit the data and get the model back.
model = estimator(X, y)
# Set the color if not passed in.
if 'c' not in kwargs and 'color' not in kwargs:
kwargs['color'] = LINE_COLOR
# Get the current working axes
ax = ax or plt.gca()
# Plot line of best fit onto the axes that were passed in.
# TODO: determine if xlim or X.min(), X.max() are better params
xr = np.linspace(*ax.get_xlim(), num=100)
ax.plot(xr, model.predict(xr[:,np.newaxis]), **kwargs)
return ax
##########################################################################
## Estimator Functions
##########################################################################
def fit_select_best(X, y):
"""
Selects the best fit of the estimators already implemented by choosing the
model with the smallest mean square error metric for the trained values.
"""
models = [fit(X,y) for fit in [fit_linear, fit_quadratic]]
errors = map(lambda model: mse(y, model.predict(X)), models)
return min(zip(models, errors), key=itemgetter(1))[0]
def fit_linear(X, y):
"""
Uses OLS to fit the regression.
"""
model = linear_model.LinearRegression()
model.fit(X, y)
return model
def fit_quadratic(X, y):
"""
Uses OLS with Polynomial order 2.
"""
model = make_pipeline(
PolynomialFeatures(2), linear_model.LinearRegression()
)
model.fit(X, y)
return model
def fit_exponential(X, y):
"""
Fits an exponential curve to the data.
"""
raise NotImplementedError("Exponential best fit lines are not implemented")
def fit_log(X, y):
"""
Fit a logrithmic curve to the data.
"""
raise NotImplementedError("Logrithmic best fit lines are not implemented")
##########################################################################
## Draw 45 Degree Line
##########################################################################
def draw_identity_line(ax=None, dynamic=True, **kwargs):
"""
Draws a 45 degree identity line such that y=x for all points within the
given axes x and y limits. This function also registeres a callback so
that as the figure is modified, the axes are updated and the line remains
drawn correctly.
Parameters
----------
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
dynamic : bool, default : True
If the plot is dynamic, callbacks will be registered to update the
identiy line as axes are changed.
kwargs : dict
Keyword arguments to pass to the matplotlib plot function to style the
identity line.
Returns
-------
ax : matplotlib Axes
The axes with the line drawn on it.
Notes
-----
.. seealso:: `StackOverflow discussion: Does matplotlib have a function for drawing diagonal lines in axis coordinates? <https://stackoverflow.com/questions/22104256/does-matplotlib-have-a-function-for-drawing-diagonal-lines-in-axis-coordinates>`_
"""
# Get the current working axes
ax = ax or plt.gca()
# Define the standard line color
if 'c' not in kwargs and 'color' not in kwargs:
kwargs['color'] = LINE_COLOR
# Define the standard opacity
if 'alpha' not in kwargs:
kwargs['alpha'] = 0.5
# Draw the identity line
identity, = ax.plot([],[], **kwargs)
# Define the callback
def callback(ax):
# Get the x and y limits on the axes
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Set the bounding range of the line
data = (
max(xlim[0], ylim[0]), min(xlim[1], ylim[1])
)
identity.set_data(data, data)
# Register the callback and return
callback(ax)
if dynamic:
ax.callbacks.connect('xlim_changed', callback)
ax.callbacks.connect('ylim_changed', callback)
return ax
if __name__ == '__main__':
import os
import pandas as pd
path = os.path.join(os.path.dirname(__file__), "..", "examples", "data", "concrete.xls")
if not os.path.exists(path):
raise Exception("Could not find path for testing")
xkey = 'Fine Aggregate (component 7)(kg in a m^3 mixture)'
ykey = 'Coarse Aggregate (component 6)(kg in a m^3 mixture)'
data = pd.read_excel(path)
fig, axe = plt.subplots()
axe.scatter(data[xkey], data[ykey])
draw_best_fit(data[xkey], data[ykey], axe, 'select_best')
plt.show()
|
{
"content_hash": "e2cabf7946b9008a28f312c6447d021d",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 251,
"avg_line_length": 31.775086505190313,
"alnum_prop": 0.587063051290428,
"repo_name": "pdamodaran/yellowbrick",
"id": "32b401fa87822b02b4d821168c74ea5f3155f445",
"size": "9531",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "yellowbrick/bestfit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1082"
},
{
"name": "Python",
"bytes": "1218356"
},
{
"name": "TeX",
"bytes": "3743"
}
],
"symlink_target": ""
}
|
import re
from wlauto import LinuxDevice, Parameter
from wlauto.exceptions import DeviceError
from wlauto.core.device import RuntimeParameter
from wlauto.utils.misc import convert_new_lines
from wlauto.utils.types import boolean
class ChromeOsDevice(LinuxDevice):
name = "chromeos_test_image"
description = """
Chrome OS test image device. Use this if you are working on a Chrome OS device with a test
image. An off the shelf device will not work with this device interface.
More information on how to build a Chrome OS test image can be found here:
https://www.chromium.org/chromium-os/developer-guide#TOC-Build-a-disk-image-for-your-board
"""
platform = 'chromeos'
abi = 'armeabi'
has_gpu = True
default_timeout = 100
parameters = [
Parameter('core_names', default=[], override=True),
Parameter('core_clusters', default=[], override=True),
Parameter('username', default='root', override=True),
Parameter('password_prompt', default='Password:', override=True),
Parameter('binaries_directory', default='/usr/local/bin', override=True),
Parameter('working_directory', default='/home/root/wa-working', override=True),
]
runtime_parameters = [
RuntimeParameter('ui', 'get_ui_status', 'set_ui_status', value_name='status'),
]
def __init__(self, **kwargs):
super(ChromeOsDevice, self).__init__(**kwargs)
self.ui_status = None
def validate(self):
# pylint: disable=access-member-before-definition,attribute-defined-outside-init
if self.password is None and not self.keyfile:
self.password = 'test0000'
def initialize(self, context, *args, **kwargs):
if self.busybox == 'busybox':
self.logger.debug('Busybox already installed on the device: replacing with wa version')
self.uninstall('busybox')
self.busybox = self.deploy_busybox(context)
def get_ui_status(self):
return self.ui_status
def set_ui_status(self, status):
self.ui_status = boolean(status)
if self.ui_status is None:
pass
elif self.ui_status:
try:
self.execute('start ui')
except DeviceError:
pass
else:
try:
self.execute('stop ui')
except DeviceError:
pass
def stop(self):
if self.ui_status is None:
pass
elif not self.ui_status:
try:
self.execute('start ui')
except DeviceError:
pass
else:
pass
self.ui_status = None
|
{
"content_hash": "36c94658320d27d7773f6277ef62aae0",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 99,
"avg_line_length": 32.11904761904762,
"alnum_prop": 0.6137879911045219,
"repo_name": "bjackman/workload-automation",
"id": "943027a0a29a7e54cb78939c1ad954ef12a8f688",
"size": "3284",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "wlauto/devices/linux/chromeos_test_image/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "40003"
},
{
"name": "HTML",
"bytes": "243720"
},
{
"name": "Java",
"bytes": "226912"
},
{
"name": "JavaScript",
"bytes": "6578"
},
{
"name": "Jupyter Notebook",
"bytes": "1322"
},
{
"name": "Makefile",
"bytes": "430"
},
{
"name": "Python",
"bytes": "1555462"
},
{
"name": "Shell",
"bytes": "39222"
},
{
"name": "Vim script",
"bytes": "901"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class AccessKeys(Model):
"""Namespace/Relay Connection String.
:param primary_connection_string: Primary connection string of the created
namespace authorization rule.
:type primary_connection_string: str
:param secondary_connection_string: Secondary connection string of the
created namespace authorization rule.
:type secondary_connection_string: str
:param primary_key: A base64-encoded 256-bit primary key for signing and
validating the SAS token.
:type primary_key: str
:param secondary_key: A base64-encoded 256-bit secondary key for signing
and validating the SAS token.
:type secondary_key: str
:param key_name: A string that describes the authorization rule.
:type key_name: str
"""
_attribute_map = {
'primary_connection_string': {'key': 'primaryConnectionString', 'type': 'str'},
'secondary_connection_string': {'key': 'secondaryConnectionString', 'type': 'str'},
'primary_key': {'key': 'primaryKey', 'type': 'str'},
'secondary_key': {'key': 'secondaryKey', 'type': 'str'},
'key_name': {'key': 'keyName', 'type': 'str'},
}
def __init__(self, primary_connection_string=None, secondary_connection_string=None, primary_key=None, secondary_key=None, key_name=None):
self.primary_connection_string = primary_connection_string
self.secondary_connection_string = secondary_connection_string
self.primary_key = primary_key
self.secondary_key = secondary_key
self.key_name = key_name
|
{
"content_hash": "3a5dee13ca12539f1ff55415ba287d21",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 142,
"avg_line_length": 44.166666666666664,
"alnum_prop": 0.6855345911949685,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "ad3ee4dcc53d652db8346ee30ebcc56dd6ff9470",
"size": "2064",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-relay/azure/mgmt/relay/models/access_keys.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "flights.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
{
"content_hash": "eea016538b84596c5c166dc2bc634434",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 37.285714285714285,
"alnum_prop": 0.6206896551724138,
"repo_name": "kostya9/KPI.RelationalDatabases",
"id": "07cfaf696b8ab8599cbfa487f608642564ba9d8f",
"size": "805",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Lab3/flights/manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "123718"
},
{
"name": "HTML",
"bytes": "1088"
},
{
"name": "JavaScript",
"bytes": "1540891"
},
{
"name": "PLSQL",
"bytes": "241"
},
{
"name": "Python",
"bytes": "65510"
},
{
"name": "SQLPL",
"bytes": "436"
},
{
"name": "Vue",
"bytes": "151714"
}
],
"symlink_target": ""
}
|
from security_monkey import app
from security_monkey.cloudaux_watcher import CloudAuxWatcher
from security_monkey.cloudaux_watcher import CloudAuxChangeItem
from security_monkey.decorators import record_exception
from cloudaux.decorators import iter_account_region
class CloudAuxBatchedWatcher(CloudAuxWatcher):
def __init__(self, **kwargs):
super(CloudAuxBatchedWatcher, self).__init__(**kwargs)
self.batched_size = 100
self.done_slurping = False
def slurp_list(self):
self.prep_for_batch_slurp()
@record_exception(source='{index}-watcher'.format(index=self.index), pop_exception_fields=True)
def invoke_list_method(**kwargs):
return self.list_method(**kwargs['conn_dict'])
@iter_account_region(self.service_name, accounts=self.account_identifiers,
regions=self._get_regions(), conn_type='dict')
def get_item_list(**kwargs):
kwargs, exception_map = self._add_exception_fields_to_kwargs(**kwargs)
items = invoke_list_method(**kwargs)
if not items:
items = list()
return items, exception_map
items, exception_map = self._flatten_iter_response(get_item_list())
self.total_list.extend(items)
if not items:
self.done_slurping = True
return items, exception_map
def slurp(self):
@record_exception(source='{index}-watcher'.format(index=self.index), pop_exception_fields=True)
def invoke_get_method(item, **kwargs):
return self.get_method(item, **kwargs['conn_dict'])
# We need to embed the region into the item in the total list, hence the "TBD"
@iter_account_region(self.service_name, accounts=self.account_identifiers, conn_type='dict', regions=["TBD"])
def slurp_items(**kwargs):
item_list = list()
kwargs, exception_map = self._add_exception_fields_to_kwargs(**kwargs)
item_counter = self.batch_counter * self.batched_size
skip_counter = 0 # Need to track number of items skipped so that the batches don't overlap
while self.batched_size - (len(item_list) + skip_counter) > 0 and not self.done_slurping:
cursor = self.total_list[item_counter]
item_name = self.get_name_from_list_output(cursor)
if item_name and self.check_ignore_list(item_name):
item_counter += 1
skip_counter += 1
if item_counter == len(self.total_list):
self.done_slurping = True
continue
kwargs["conn_dict"]["region"] = cursor["Region"] # Inject the region in.
app.logger.debug("Account: {account}, Batched Watcher: {watcher}, Fetching item: "
"{item}/{region}".format(account=kwargs["account_name"],
watcher=self.index,
item=item_name,
region=kwargs["conn_dict"]["region"]))
item_details = invoke_get_method(cursor, name=item_name, **kwargs)
if item_details:
# Determine which region to record the item into.
# Some tech, like IAM, is global and so we record it as 'universal' by setting an override_region
# Some tech, like S3, requires an initial connection to us-east-1, though a buckets actual
# region may be different. Extract the actual region from item_details.
# Otherwise, just use the region where the boto connection was made.
record_region = self.override_region or \
item_details.get('Region') or kwargs['conn_dict']['region']
item = CloudAuxChangeItem.from_item(
name=item_name,
item=item_details,
record_region=record_region,
source_watcher=self,
**kwargs)
item_list.append(item)
else:
# Item not fetched (possibly deleted after grabbing the list) -- have to account in batch
skip_counter += 1
item_counter += 1
if item_counter == len(self.total_list):
self.done_slurping = True
self.batch_counter += 1
return item_list, exception_map
return self._flatten_iter_response(slurp_items())
|
{
"content_hash": "3d78261190303407beb474e9069b6d79",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 117,
"avg_line_length": 48.08163265306123,
"alnum_prop": 0.5600594227504244,
"repo_name": "Netflix/security_monkey",
"id": "a547e8e0e88b4401998e0f1a71a6e143586465fe",
"size": "4712",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "security_monkey/cloudaux_batched_watcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22837"
},
{
"name": "Dart",
"bytes": "130852"
},
{
"name": "Dockerfile",
"bytes": "3841"
},
{
"name": "HTML",
"bytes": "120266"
},
{
"name": "JavaScript",
"bytes": "13728"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1578684"
},
{
"name": "Shell",
"bytes": "30939"
}
],
"symlink_target": ""
}
|
import os
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle
os.environ["CPU_NUM"] = "2"
class TestFetchUnmerged(unittest.TestCase):
def conv_net(self, img, label):
conv_pool_1 = fluid.nets.simple_img_conv_pool(
input=img,
filter_size=5,
num_filters=8,
pool_size=2,
pool_stride=2,
pool_type='max',
act="relu",
)
conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
num_filters=16,
pool_size=2,
pool_stride=2,
pool_type='avg',
act="relu",
)
hidden = fluid.layers.fc(input=conv_pool_2, size=32, act='relu')
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = paddle.mean(loss)
return avg_loss, prediction
def build_program(self, main, startup, is_test):
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
img = fluid.layers.data(
name='image', shape=[1, 28, 28], dtype='float32'
)
label = fluid.layers.data(
name='label', shape=[1], dtype='int64'
)
loss, prediction = self.conv_net(img, label)
if not is_test:
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
return [img, label], loss, prediction
def fetch_unmerged(self, use_cuda=True):
main_program = fluid.Program()
startup_program = fluid.Program()
feeds, loss, prediction = self.build_program(
main_program, startup_program, False
)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
build_strategy = fluid.BuildStrategy()
binary = fluid.CompiledProgram(main_program).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy
)
iters = 2
batch_size = 16
train_reader = paddle.batch(
paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500),
batch_size=batch_size,
)
feeder = fluid.DataFeeder(feed_list=feeds, place=place)
device_num = fluid.core.get_cuda_device_count() if use_cuda else 2
for _ in range(iters):
data = next(train_reader())
loss_v, prediction_v = exe.run(
binary,
feed=feeder.feed(data),
fetch_list=[loss, prediction],
return_merged=False,
)
self.assertEqual(np.array(loss_v).shape, (device_num, 1))
self.assertEqual(
np.array(prediction_v).shape,
(device_num, batch_size / device_num, 10),
)
for _ in range(iters):
data = next(train_reader())
loss_v, prediction_v = exe.run(
binary,
feed=feeder.feed(data),
fetch_list=[loss, prediction],
return_merged=True,
)
self.assertEqual(np.array(loss_v).shape, (device_num,))
self.assertEqual(np.array(prediction_v).shape, (batch_size, 10))
def test_fetch_unmerged(self):
if fluid.core.is_compiled_with_cuda():
self.fetch_unmerged(use_cuda=True)
self.fetch_unmerged(use_cuda=False)
def test_fetch_unmerged_parallel_graph(self):
fluid.core.globals()['FLAGS_enable_parallel_graph'] = True
if fluid.core.is_compiled_with_cuda():
self.fetch_unmerged(use_cuda=True)
self.fetch_unmerged(use_cuda=False)
fluid.core.globals()['FLAGS_enable_parallel_graph'] = False
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "aa74cb8058a2c3f516f0b6a743cfa0d0",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 78,
"avg_line_length": 35.1551724137931,
"alnum_prop": 0.5505149583128984,
"repo_name": "luotao1/Paddle",
"id": "0da628db92c1123170e606f29abee330d6a8e66e",
"size": "4691",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_fetch_unmerged.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
}
|
import base64
from saml2.authn_context import INTERNETPROTOCOLPASSWORD
from saml2.saml import NAMEID_FORMAT_TRANSIENT
from saml2.samlp import NameIDPolicy
from s2repoze.plugins.sp import make_plugin
from saml2.server import Server
ENV1 = {'SERVER_SOFTWARE': 'CherryPy/3.1.2 WSGI Server',
'SCRIPT_NAME': '',
'ACTUAL_SERVER_PROTOCOL': 'HTTP/1.1',
'REQUEST_METHOD': 'GET',
'PATH_INFO': '/krissms',
'SERVER_PROTOCOL': 'HTTP/1.1',
'QUERY_STRING': '',
'REMOTE_ADDR': '127.0.0.1',
'HTTP_USER_AGENT':
'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_2; en-us) ',
'HTTP_CONNECTION': 'keep-alive',
'SERVER_NAME': 'lingon-catalogix-se-2.local',
'REMOTE_PORT': '57309',
'wsgi.url_scheme': 'http',
'SERVER_PORT': '8087',
'HTTP_HOST': '127.0.0.1:8087',
'wsgi.multithread': True,
'HTTP_ACCEPT':
'application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',
'wsgi.version': (1, 0),
'wsgi.run_once': False,
'wsgi.multiprocess': False,
'HTTP_ACCEPT_LANGUAGE': 'en-us',
'HTTP_ACCEPT_ENCODING': 'gzip, deflate'}
trans_name_policy = NameIDPolicy(format=NAMEID_FORMAT_TRANSIENT,
allow_create="true")
AUTHN = {
"class_ref": INTERNETPROTOCOLPASSWORD,
"authn_auth": "http://www.example.com/login"
}
class TestSP():
def setup_class(self):
self.sp = make_plugin("rem", saml_conf="server_conf")
self.server = Server(config_file="idp_conf")
def test_setup(self):
assert self.sp
def test_identify(self):
# Create a SAMLResponse
ava = { "givenName": ["Derek"], "surName": ["Jeter"],
"mail": ["derek@nyy.mlb.com"], "title":["The man"]}
resp_str = "%s" % self.server.create_authn_response(
ava, "id1", "http://lingon.catalogix.se:8087/",
"urn:mace:example.com:saml:roland:sp", trans_name_policy,
"foba0001@example.com", authn=AUTHN)
resp_str = base64.encodestring(resp_str)
self.sp.outstanding_queries = {"id1":"http://www.example.com/service"}
session_info = self.sp._eval_authn_response({},
{"SAMLResponse": resp_str})
assert len(session_info) > 1
assert session_info["came_from"] == 'http://www.example.com/service'
assert session_info["ava"] == {'givenName': ['Derek'],
'mail': ['derek@nyy.mlb.com'],
'sn': ['Jeter'],
'title': ['The man']}
if __name__ == "__main__":
_sp = TestSP()
_sp.setup_class()
_sp.test_identify()
|
{
"content_hash": "a249db6e98c09c78607580ae429b9f83",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 102,
"avg_line_length": 35.78947368421053,
"alnum_prop": 0.5658088235294118,
"repo_name": "arbn/pysaml2",
"id": "76c2b551f2b13facc3301e75b42d061e78f82614",
"size": "2767",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_60_sp.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "2404671"
},
{
"name": "Shell",
"bytes": "3398"
}
],
"symlink_target": ""
}
|
import os
import subprocess
import json
import contextlib
from nose.tools import istest, assert_equal, assert_raises
from whack.sources import \
PackageSourceFetcher, PackageSourceNotFound, SourceHashMismatch, \
PackageSource, create_source_tarball
from whack.tempdir import create_temporary_dir
from whack.files import read_file, write_files, plain_file
from whack.tarballs import create_tarball
from whack.errors import FileNotFoundError
from .httpserver import start_static_http_server
from .indexserver import start_index_server
@istest
def can_fetch_package_source_from_source_control():
def put_package_source_into_source_control(package_source_dir):
_convert_to_git_repo(package_source_dir)
return "git+file://{0}".format(package_source_dir)
_assert_package_source_can_be_written_to_target_dir(
put_package_source_into_source_control
)
@istest
def can_fetch_package_source_from_local_dir():
_assert_package_source_can_be_written_to_target_dir(
lambda package_source_dir: package_source_dir
)
@istest
def can_fetch_package_source_from_local_tarball():
with create_temporary_dir() as temp_dir:
def create_source(package_source_dir):
tarball_path = os.path.join(temp_dir, "package.tar.gz")
return create_tarball(tarball_path, package_source_dir)
_assert_package_source_can_be_written_to_target_dir(create_source)
@istest
def can_fetch_package_source_from_tarball_on_http_server():
with _temporary_static_server() as server:
def create_source(package_source_dir):
tarball_path = os.path.join(server.root, "package.tar.gz")
create_tarball(tarball_path, package_source_dir)
return server.static_url("package.tar.gz")
_assert_package_source_can_be_written_to_target_dir(create_source)
@istest
def can_fetch_package_source_from_whack_source_uri():
with _temporary_static_server() as server:
def create_source(package_source_dir):
package_source = PackageSource.local(package_source_dir)
source_tarball = create_source_tarball(package_source, server.root)
filename = os.path.relpath(source_tarball.path, server.root)
return server.static_url(filename)
_assert_package_source_can_be_written_to_target_dir(create_source)
@istest
def error_is_raised_if_hash_is_not_correct():
with _temporary_static_server() as server:
with _create_temporary_package_source_dir() as package_source_dir:
tarball_path = os.path.join(server.root, "package-a452cd.whack-source")
create_tarball(tarball_path, package_source_dir)
package_uri = server.static_url("package-a452cd.whack-source")
assert_raises(
SourceHashMismatch,
lambda: _fetch_source(package_uri)
)
@istest
def can_fetch_package_source_using_url_from_html_index():
with start_index_server() as index_server:
def create_source(package_source_dir):
source_tarball = index_server.add_source(package_source_dir)
return source_tarball.full_name
_assert_package_source_can_be_written_to_target_dir(
create_source,
indices=[index_server.index_url()]
)
def _assert_package_source_can_be_written_to_target_dir(source_filter, indices=None):
with _create_temporary_package_source_dir() as package_source_dir:
package_source_name = source_filter(package_source_dir)
with _fetch_source(package_source_name, indices) as package_source:
with create_temporary_dir() as target_dir:
package_source.write_to(target_dir)
assert_equal(
"Bob",
read_file(os.path.join(target_dir, "whack/name"))
)
@contextlib.contextmanager
def _create_temporary_package_source_dir():
package_source_files = [plain_file("whack/name", "Bob")]
with create_temporary_dir(package_source_files) as package_source_dir:
yield package_source_dir
@istest
def writing_package_source_includes_files_specified_in_description():
with create_temporary_dir() as package_source_dir:
whack_description = {
"sourcePaths": ["name"]
}
write_files(package_source_dir, [
plain_file("whack/whack.json", json.dumps(whack_description)),
plain_file("name", "Bob"),
])
with _fetch_source(package_source_dir) as package_source:
with create_temporary_dir() as target_dir:
package_source.write_to(target_dir)
assert_equal(
"Bob",
read_file(os.path.join(target_dir, "name"))
)
@istest
def writing_package_source_includes_directories_specified_in_description():
with create_temporary_dir() as package_source_dir:
whack_description = {
"sourcePaths": ["names"]
}
write_files(package_source_dir, [
plain_file("whack/whack.json", json.dumps(whack_description)),
plain_file("names/bob", "Bob"),
])
with _fetch_source(package_source_dir) as package_source:
with create_temporary_dir() as target_dir:
package_source.write_to(target_dir)
assert_equal(
"Bob",
read_file(os.path.join(target_dir, "names/bob"))
)
@istest
def writing_source_raises_error_if_file_is_missing():
with create_temporary_dir() as package_source_dir:
whack_description = {
"sourcePaths": ["name"]
}
write_files(package_source_dir, [
plain_file("whack/whack.json", json.dumps(whack_description)),
])
with _fetch_source(package_source_dir) as package_source:
with create_temporary_dir() as target_dir:
assert_raises(
FileNotFoundError,
lambda: package_source.write_to(target_dir)
)
@istest
def error_is_raised_if_package_source_cannot_be_found():
assert_raises(PackageSourceNotFound, lambda: _fetch_source("nginx/1"))
@istest
def name_is_stored_in_whack_json():
with _source_package_with_description({"name": "nginx"}) as package_source:
assert_equal("nginx", package_source.name())
@istest
def name_of_package_source_is_unknown_if_not_specified_in_whack_json():
with _source_package_with_description({}) as package_source:
assert_equal("unknown", package_source.name())
@istest
def name_of_package_source_is_unknown_if_whack_json_does_not_exist():
with create_temporary_dir() as package_source_dir:
package_source = PackageSource.local(package_source_dir)
assert_equal("unknown", package_source.name())
@istest
def description_of_package_source_contains_param_slug():
description = {"name": "nginx", "paramSlug": "$nginx_version"}
with _source_package_with_description(description) as package_source:
assert_equal("$nginx_version", package_source.description().param_slug())
def _convert_to_git_repo(cwd):
def _git(command):
subprocess.check_call(["git"] + command, cwd=cwd)
_git(["init"])
_git(["add", "."])
_git(["commit", "-m", "Initial commit"])
def _fetch_source(package_source_uri, indices=None):
source_fetcher = PackageSourceFetcher(indices=indices)
return source_fetcher.fetch(package_source_uri)
@contextlib.contextmanager
def _temporary_static_server():
with create_temporary_dir() as server_root:
with start_static_http_server(server_root) as server:
yield server
@contextlib.contextmanager
def _source_package_with_description(description):
with create_temporary_dir() as package_source_dir:
write_files(package_source_dir, [
plain_file("whack/whack.json", json.dumps(description)),
])
yield PackageSource.local(package_source_dir)
|
{
"content_hash": "5d02fa0d409dc929d852ccccb32b46fa",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 85,
"avg_line_length": 35.06437768240343,
"alnum_prop": 0.6427172582619339,
"repo_name": "mwilliamson/whack",
"id": "3ad857e1ad7b40f3c231bf6edc07cf383754a6c8",
"size": "8170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sources_tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "530"
},
{
"name": "Python",
"bytes": "106682"
}
],
"symlink_target": ""
}
|
import math
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from utils import *
class batch_norm(object):
def __init__(self, epsilon=1e-5, momentum = 0.9, name="batch_norm"):
with tf.variable_scope(name):
self.epsilon = epsilon
self.momentum = momentum
self.name = name
def __call__(self, x, train=True):
return tf.contrib.layers.batch_norm(x, decay=self.momentum, fused=False, updates_collections=None, epsilon=self.epsilon, scale=True, scope=self.name)
def binary_cross_entropy(preds, targets, name=None):
"""Computes binary cross entropy given `preds`.
For brevity, let `x = `, `z = targets`. The logistic loss is
loss(x, z) = - sum_i (x[i] * log(z[i]) + (1 - x[i]) * log(1 - z[i]))
Args:
preds: A `Tensor` of type `float32` or `float64`.
targets: A `Tensor` of the same type and shape as `preds`.
"""
eps = 1e-12
with ops.op_scope([preds, targets], name, "bce_loss") as name:
preds = ops.convert_to_tensor(preds, name="preds")
targets = ops.convert_to_tensor(targets, name="targets")
return tf.reduce_mean(-(targets * tf.log(preds + eps) +
(1. - targets) * tf.log(1. - preds + eps)))
def conv_cond_concat(x, y):
"""Concatenate conditioning vector on feature map axis."""
x_shapes = x.get_shape()
y_shapes = y.get_shape()
return tf.concat([x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)
def conv2d(input_, output_dim,
k_h=6, k_w=6, d_h=2, d_w=2, stddev=0.02,
name="conv2d"):
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape())
return conv
def deconv2d(input_, output_shape,
k_h=6, k_w=6, d_h=2, d_w=2, stddev=0.02,
name="deconv2d", with_w=False):
with tf.variable_scope(name):
# filter : [height, width, output_channels, in_channels]
w = tf.get_variable('w', [k_h, k_w, output_shape[-1], input_.get_shape()[-1]],
initializer=tf.random_normal_initializer(stddev=stddev))
try:
deconv = tf.nn.conv2d_transpose(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
# Support for verisons of TensorFlow before 0.7.0
except AttributeError:
deconv = tf.nn.deconv2d(input_, w, output_shape=output_shape,
strides=[1, d_h, d_w, 1])
biases = tf.get_variable('biases', [output_shape[-1]], initializer=tf.constant_initializer(0.0))
deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
if with_w:
return deconv, w, biases
else:
return deconv
def lrelu(x, leak=0.2, name="lrelu"):
return tf.maximum(x, leak*x)
def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):
shape = input_.get_shape().as_list()
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable("Matrix", [shape[1], output_size], tf.float32,
tf.random_normal_initializer(stddev=stddev))
bias = tf.get_variable("bias", [output_size],
initializer=tf.constant_initializer(bias_start))
if with_w:
return tf.matmul(input_, matrix) + bias, matrix, bias
else:
return tf.matmul(input_, matrix) + bias
|
{
"content_hash": "92bf1981780328ebf5c6977e2633624d",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 157,
"avg_line_length": 40.489583333333336,
"alnum_prop": 0.5809107280679187,
"repo_name": "ultra-lstm/RNA-GAN",
"id": "73463b2b5e1e817800fc84617c999ffb48edf2f5",
"size": "3887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cGAN/ops.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41425"
}
],
"symlink_target": ""
}
|
import functools
import itertools
import logging
import random
import time
import six
from oslo.config import cfg
from oslo.messaging._drivers import amqp as rpc_amqp
from oslo.messaging._drivers import amqpdriver
from oslo.messaging._drivers import common as rpc_common
from oslo.messaging import exceptions
from oslo.messaging.openstack.common import importutils
from oslo.messaging.openstack.common import jsonutils
from oslo.messaging.openstack.common import network_utils
# FIXME(markmc): remove this
_ = lambda s: s
qpid_codec = importutils.try_import("qpid.codec010")
qpid_messaging = importutils.try_import("qpid.messaging")
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
LOG = logging.getLogger(__name__)
qpid_opts = [
cfg.StrOpt('qpid_hostname',
default='localhost',
help='Qpid broker hostname.'),
cfg.IntOpt('qpid_port',
default=5672,
help='Qpid broker port.'),
cfg.ListOpt('qpid_hosts',
default=['$qpid_hostname:$qpid_port'],
help='Qpid HA cluster host:port pairs.'),
cfg.StrOpt('qpid_username',
default='',
help='Username for Qpid connection.'),
cfg.StrOpt('qpid_password',
default='',
help='Password for Qpid connection.',
secret=True),
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for '
'auth.'),
cfg.IntOpt('qpid_heartbeat',
default=60,
help='Seconds between connection keepalive heartbeats.'),
cfg.StrOpt('qpid_protocol',
default='tcp',
help="Transport to use, either 'tcp' or 'ssl'."),
cfg.BoolOpt('qpid_tcp_nodelay',
default=True,
help='Whether to disable the Nagle algorithm.'),
# NOTE(russellb) If any additional versions are added (beyond 1 and 2),
# this file could probably use some additional refactoring so that the
# differences between each version are split into different classes.
cfg.IntOpt('qpid_topology_version',
default=1,
help="The qpid topology version to use. Version 1 is what "
"was originally used by impl_qpid. Version 2 includes "
"some backwards-incompatible changes that allow broker "
"federation to work. Users should update to version 2 "
"when they are able to take everything down, as it "
"requires a clean break."),
]
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
def raise_invalid_topology_version(conf):
msg = (_("Invalid value for qpid_topology_version: %d") %
conf.qpid_topology_version)
LOG.error(msg)
raise Exception(msg)
class QpidMessage(dict):
def __init__(self, session, raw_message):
super(QpidMessage, self).__init__(
rpc_common.deserialize_msg(raw_message.content))
self._raw_message = raw_message
self._session = session
def acknowledge(self):
self._session.acknowledge(self._raw_message)
def requeue(self):
pass
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, conf, session, callback, node_name, node_opts,
link_name, link_opts):
"""Declare a queue on an amqp session.
'session' is the amqp session to use
'callback' is the callback to call when messages are received
'node_name' is the first part of the Qpid address string, before ';'
'node_opts' will be applied to the "x-declare" section of "node"
in the address string.
'link_name' goes into the "name" field of the "link" in the address
string
'link_opts' will be applied to the "x-declare" section of "link"
in the address string.
"""
self.callback = callback
self.receiver = None
self.session = None
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": True,
"auto-delete": True,
},
},
"link": {
"durable": True,
"x-declare": {
"durable": False,
"auto-delete": True,
"exclusive": False,
},
},
}
addr_opts["node"]["x-declare"].update(node_opts)
elif conf.qpid_topology_version == 2:
addr_opts = {
"link": {
"x-declare": {
"auto-delete": True,
"exclusive": False,
},
},
}
else:
raise_invalid_topology_version(conf)
addr_opts["link"]["x-declare"].update(link_opts)
if link_name:
addr_opts["link"]["name"] = link_name
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.connect(session)
def connect(self, session):
"""Declare the receiver on connect."""
self._declare_receiver(session)
def reconnect(self, session):
"""Re-declare the receiver after a Qpid reconnect."""
self._declare_receiver(session)
def _declare_receiver(self, session):
self.session = session
self.receiver = session.receiver(self.address)
self.receiver.capacity = 1
def _unpack_json_msg(self, msg):
"""Load the JSON data in msg if msg.content_type indicates that it
is necessary. Put the loaded data back into msg.content and
update msg.content_type appropriately.
A Qpid Message containing a dict will have a content_type of
'amqp/map', whereas one containing a string that needs to be converted
back from JSON will have a content_type of JSON_CONTENT_TYPE.
:param msg: a Qpid Message object
:returns: None
"""
if msg.content_type == JSON_CONTENT_TYPE:
msg.content = jsonutils.loads(msg.content)
msg.content_type = 'amqp/map'
def consume(self):
"""Fetch the message and pass it to the callback object."""
message = self.receiver.fetch()
try:
self._unpack_json_msg(message)
self.callback(QpidMessage(self.session, message))
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
self.session.acknowledge(message)
def get_receiver(self):
return self.receiver
def get_node_name(self):
return self.address.split(';')[0]
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
def __init__(self, conf, session, msg_id, callback):
"""Init a 'direct' queue.
'session' is the amqp session to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
"""
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"exclusive": True,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (msg_id, msg_id)
node_opts = {"type": "direct"}
link_name = msg_id
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
link_name = msg_id
else:
raise_invalid_topology_version(conf)
super(DirectConsumer, self).__init__(conf, session, callback,
node_name, node_opts, link_name,
link_opts)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'."""
def __init__(self, conf, session, topic, callback, exchange_name,
name=None):
"""Init a 'topic' queue.
:param session: the amqp session to use
:param topic: is the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param name: optional queue name, defaults to topic
"""
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version(conf)
super(TopicConsumer, self).__init__(conf, session, callback, node_name,
{}, name or topic, link_opts)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'."""
def __init__(self, conf, session, topic, callback):
"""Init a 'fanout' queue.
'session' is the amqp session to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
"""
self.conf = conf
link_opts = {"exclusive": True}
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"durable": False, "type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version(conf)
super(FanoutConsumer, self).__init__(conf, session, callback,
node_name, node_opts, None,
link_opts)
class Publisher(object):
"""Base Publisher class."""
def __init__(self, conf, session, node_name, node_opts=None):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.sender = None
self.session = session
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": False,
# auto-delete isn't implemented for exchanges in qpid,
# but put in here anyway
"auto-delete": True,
},
},
}
if node_opts:
addr_opts["node"]["x-declare"].update(node_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
elif conf.qpid_topology_version == 2:
self.address = node_name
else:
raise_invalid_topology_version(conf)
self.reconnect(session)
def reconnect(self, session):
"""Re-establish the Sender after a reconnection."""
self.sender = session.sender(self.address)
def _pack_json_msg(self, msg):
"""Qpid cannot serialize dicts containing strings longer than 65535
characters. This function dumps the message content to a JSON
string, which Qpid is able to handle.
:param msg: May be either a Qpid Message object or a bare dict.
:returns: A Qpid Message with its content field JSON encoded.
"""
try:
msg.content = jsonutils.dumps(msg.content)
except AttributeError:
# Need to have a Qpid message so we can set the content_type.
msg = qpid_messaging.Message(jsonutils.dumps(msg))
msg.content_type = JSON_CONTENT_TYPE
return msg
def send(self, msg):
"""Send a message."""
try:
# Check if Qpid can encode the message
check_msg = msg
if not hasattr(check_msg, 'content_type'):
check_msg = qpid_messaging.Message(msg)
content_type = check_msg.content_type
enc, dec = qpid_messaging.message.get_codec(content_type)
enc(check_msg.content)
except qpid_codec.CodecException:
# This means the message couldn't be serialized as a dict.
msg = self._pack_json_msg(msg)
self.sender.send(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'."""
def __init__(self, conf, session, topic):
"""Init a 'direct' publisher."""
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (topic, topic)
node_opts = {"type": "direct"}
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version(conf)
super(DirectPublisher, self).__init__(conf, session, node_name,
node_opts)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'."""
def __init__(self, conf, session, exchange_name, topic):
"""Init a 'topic' publisher.
"""
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version(conf)
super(TopicPublisher, self).__init__(conf, session, node_name)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'."""
def __init__(self, conf, session, topic):
"""Init a 'fanout' publisher.
"""
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version(conf)
super(FanoutPublisher, self).__init__(conf, session, node_name,
node_opts)
class NotifyPublisher(Publisher):
"""Publisher class for notifications."""
def __init__(self, conf, session, exchange_name, topic):
"""Init a 'topic' publisher.
"""
node_opts = {"durable": True}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version(conf)
super(NotifyPublisher, self).__init__(conf, session, node_name,
node_opts)
class Connection(object):
"""Connection object."""
pools = {}
def __init__(self, conf, url):
if not qpid_messaging:
raise ImportError("Failed to import qpid.messaging")
self.connection = None
self.session = None
self.consumers = {}
self.conf = conf
self.brokers_params = []
if url.hosts:
for host in url.hosts:
params = {
'username': host.username or '',
'password': host.password or '',
}
if host.port is not None:
params['host'] = '%s:%d' % (host.hostname, host.port)
else:
params['host'] = host.hostname
self.brokers_params.append(params)
else:
# Old configuration format
for adr in self.conf.qpid_hosts:
hostname, port = network_utils.parse_host_port(
adr, default_port=5672)
params = {
'host': '%s:%d' % (hostname, port),
'username': self.conf.qpid_username,
'password': self.conf.qpid_password,
}
self.brokers_params.append(params)
random.shuffle(self.brokers_params)
self.brokers = itertools.cycle(self.brokers_params)
self.reconnect()
def _connect(self, broker):
# Create the connection - this does not open the connection
self.connection = qpid_messaging.Connection(broker['host'])
# Check if flags are set and if so set them for the connection
# before we call open
self.connection.username = broker['username']
self.connection.password = broker['password']
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
# Reconnection is done by self.reconnect()
self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.transport = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
self.connection.open()
def _register_consumer(self, consumer):
self.consumers[six.text_type(consumer.get_receiver())] = consumer
def _lookup_consumer(self, receiver):
return self.consumers[six.text_type(receiver)]
def _disconnect(self):
# Close the session if necessary
if self.connection is not None and self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.MessagingError:
pass
self.connection = None
def reconnect(self, retry=None):
"""Handles reconnecting and re-establishing sessions and queues.
Will retry up to retry number of times.
retry = None or -1 means to retry forever
retry = 0 means no retry
retry = N means N retries
"""
delay = 1
attempt = 0
loop_forever = False
if retry is None or retry < 0:
loop_forever = True
while True:
self._disconnect()
attempt += 1
broker = six.next(self.brokers)
try:
self._connect(broker)
except qpid_exceptions.MessagingError as e:
msg_dict = dict(e=e,
delay=delay,
retry=retry,
broker=broker)
if not loop_forever and attempt > retry:
msg = _('Unable to connect to AMQP server on '
'%(broker)s after %(retry)d '
'tries: %(e)s') % msg_dict
LOG.error(msg)
raise exceptions.MessageDeliveryFailure(msg)
else:
msg = _("Unable to connect to AMQP server on %(broker)s: "
"%(e)s. Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
delay = min(delay + 1, 5)
else:
LOG.info(_('Connected to AMQP server on %s'), broker['host'])
break
self.session = self.connection.session()
if self.consumers:
consumers = self.consumers
self.consumers = {}
for consumer in six.itervalues(consumers):
consumer.reconnect(self.session)
self._register_consumer(consumer)
LOG.debug("Re-established AMQP queues")
def ensure(self, error_callback, method, retry=None):
while True:
try:
return method()
except (qpid_exceptions.Empty,
qpid_exceptions.MessagingError) as e:
if error_callback:
error_callback(e)
self.reconnect(retry=retry)
def close(self):
"""Close/release this connection."""
try:
self.connection.close()
except Exception:
# NOTE(dripton) Logging exceptions that happen during cleanup just
# causes confusion; there's really nothing useful we can do with
# them.
pass
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
self.session.close()
self.session = self.connection.session()
self.consumers = {}
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': exc}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s"), log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.session, topic, callback)
self._register_consumer(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers."""
def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty):
LOG.debug('Timed out waiting for RPC response: %s', exc)
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s'),
exc)
def _consume():
nxt_receiver = self.session.next_receiver(timeout=timeout)
try:
self._lookup_consumer(nxt_receiver).consume()
except Exception:
LOG.exception(_("Error processing message. Skipping it."))
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def publisher_send(self, cls, topic, msg, retry=None, **kwargs):
"""Send to a publisher based on the publisher class."""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': exc}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s"), log_info)
def _publisher_send():
publisher = cls(self.conf, self.session, topic=topic, **kwargs)
publisher.send(msg)
return self.ensure(_connect_error, _publisher_send, retry=retry)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, exchange_name, topic, callback=None,
queue_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
self.publisher_send(DirectPublisher, topic=msg_id, msg=msg)
def topic_send(self, exchange_name, topic, msg, timeout=None, retry=None):
"""Send a 'topic' message."""
#
# We want to create a message with attributes, for example a TTL. We
# don't really need to keep 'msg' in its JSON format any longer
# so let's create an actual Qpid message here and get some
# value-add on the go.
#
# WARNING: Request timeout happens to be in the same units as
# Qpid's TTL (seconds). If this changes in the future, then this
# will need to be altered accordingly.
#
qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
self.publisher_send(TopicPublisher, topic=topic, msg=qpid_message,
exchange_name=exchange_name, retry=retry)
def fanout_send(self, topic, msg, retry=None):
"""Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic=topic, msg=msg, retry=retry)
def notify_send(self, exchange_name, topic, msg, retry=None, **kwargs):
"""Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic=topic, msg=msg,
exchange_name=exchange_name, retry=retry)
def consume(self, limit=None, timeout=None):
"""Consume from all queues/consumers."""
it = self.iterconsume(limit=limit, timeout=timeout)
while True:
try:
six.next(it)
except StopIteration:
return
class QpidDriver(amqpdriver.AMQPDriverBase):
def __init__(self, conf, url,
default_exchange=None, allowed_remote_exmods=None):
conf.register_opts(qpid_opts)
conf.register_opts(rpc_amqp.amqp_opts)
connection_pool = rpc_amqp.get_connection_pool(conf, url, Connection)
super(QpidDriver, self).__init__(conf, url,
connection_pool,
default_exchange,
allowed_remote_exmods)
|
{
"content_hash": "9839b766c35c95f0034c4fc9410fe8f3",
"timestamp": "",
"source": "github",
"line_count": 712,
"max_line_length": 79,
"avg_line_length": 36.30477528089887,
"alnum_prop": 0.5481063097218462,
"repo_name": "viggates/oslo.messaging",
"id": "f79fae6a722542a409c5621068c330ae9aa72ece",
"size": "26507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oslo/messaging/_drivers/impl_qpid.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "517172"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
import binascii
import struct
import base64
import time
import random
import hmac
import hashlib
import string
from shadowsocks import common
from shadowsocks.obfsplugin import plain
from shadowsocks.common import to_bytes, to_str, ord
from shadowsocks import lru_cache
def create_tls_ticket_auth_obfs(method):
return tls_ticket_auth(method)
obfs_map = {
'tls1.2_ticket_auth': (create_tls_ticket_auth_obfs,),
'tls1.2_ticket_auth_compatible': (create_tls_ticket_auth_obfs,),
'tls1.2_ticket_fastauth': (create_tls_ticket_auth_obfs,),
'tls1.2_ticket_fastauth_compatible': (create_tls_ticket_auth_obfs,),
}
def match_begin(str1, str2):
if len(str1) >= len(str2):
if str1[:len(str2)] == str2:
return True
return False
class obfs_auth_data(object):
def __init__(self):
self.client_data = lru_cache.LRUCache(60 * 5)
self.client_id = os.urandom(32)
self.startup_time = int(time.time() - 60 * 30) & 0xFFFFFFFF
class tls_ticket_auth(plain.plain):
def __init__(self, method):
self.method = method
self.handshake_status = 0
self.send_buffer = b''
self.recv_buffer = b''
self.client_id = b''
self.max_time_dif = 60 * 60 * 24 # time dif (second) setting
self.tls_version = b'\x03\x03'
self.overhead = 5
def init_data(self):
return obfs_auth_data()
def get_overhead(self, direction): # direction: true for c->s false for s->c
return self.overhead
def sni(self, url):
url = common.to_bytes(url)
data = b"\x00" + struct.pack('>H', len(url)) + url
data = b"\x00\x00" + struct.pack('>H', len(data) + 2) + struct.pack('>H', len(data)) + data
return data
def pack_auth_data(self, client_id):
utc_time = int(time.time()) & 0xFFFFFFFF
data = struct.pack('>I', utc_time) + os.urandom(18)
data += hmac.new(self.server_info.key + client_id, data, hashlib.sha1).digest()[:10]
return data
def client_encode(self, buf):
if self.handshake_status == -1:
return buf
if self.handshake_status == 8:
ret = b''
while len(buf) > 2048:
size = min(struct.unpack('>H', os.urandom(2))[0] % 4096 + 100, len(buf))
ret += b"\x17" + self.tls_version + struct.pack('>H', size) + buf[:size]
buf = buf[size:]
if len(buf) > 0:
ret += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf
return ret
if len(buf) > 0:
self.send_buffer += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf
if self.handshake_status == 0:
self.handshake_status = 1
data = self.tls_version + self.pack_auth_data(self.server_info.data.client_id) + b"\x20" + self.server_info.data.client_id + binascii.unhexlify(b"001cc02bc02fcca9cca8cc14cc13c00ac014c009c013009c0035002f000a" + b"0100")
ext = binascii.unhexlify(b"ff01000100")
host = self.server_info.obfs_param or self.server_info.host
if host and host[-1] in string.digits:
host = ''
hosts = host.split(',')
host = random.choice(hosts)
ext += self.sni(host)
ext += b"\x00\x17\x00\x00"
ext += b"\x00\x23\x00\xd0" + os.urandom(208) # ticket
ext += binascii.unhexlify(b"000d001600140601060305010503040104030301030302010203")
ext += binascii.unhexlify(b"000500050100000000")
ext += binascii.unhexlify(b"00120000")
ext += binascii.unhexlify(b"75500000")
ext += binascii.unhexlify(b"000b00020100")
ext += binascii.unhexlify(b"000a0006000400170018")
data += struct.pack('>H', len(ext)) + ext
data = b"\x01\x00" + struct.pack('>H', len(data)) + data
data = b"\x16\x03\x01" + struct.pack('>H', len(data)) + data
return data
elif self.handshake_status == 1 and len(buf) == 0:
data = b"\x14" + self.tls_version + b"\x00\x01\x01" #ChangeCipherSpec
data += b"\x16" + self.tls_version + b"\x00\x20" + os.urandom(22) #Finished
data += hmac.new(self.server_info.key + self.server_info.data.client_id, data, hashlib.sha1).digest()[:10]
ret = data + self.send_buffer
self.send_buffer = b''
self.handshake_status = 8
return ret
return b''
def client_decode(self, buf):
if self.handshake_status == -1:
return (buf, False)
if self.handshake_status == 8:
ret = b''
self.recv_buffer += buf
while len(self.recv_buffer) > 5:
if ord(self.recv_buffer[0]) != 0x17:
logging.info("data = %s" % (binascii.hexlify(self.recv_buffer)))
raise Exception('server_decode appdata error')
size = struct.unpack('>H', self.recv_buffer[3:5])[0]
if len(self.recv_buffer) < size + 5:
break
buf = self.recv_buffer[5:size+5]
ret += buf
self.recv_buffer = self.recv_buffer[size+5:]
return (ret, False)
if len(buf) < 11 + 32 + 1 + 32:
raise Exception('client_decode data error')
verify = buf[11:33]
if hmac.new(self.server_info.key + self.server_info.data.client_id, verify, hashlib.sha1).digest()[:10] != buf[33:43]:
raise Exception('client_decode data error')
if hmac.new(self.server_info.key + self.server_info.data.client_id, buf[:-10], hashlib.sha1).digest()[:10] != buf[-10:]:
raise Exception('client_decode data error')
return (b'', True)
def server_encode(self, buf):
if self.handshake_status == -1:
return buf
if (self.handshake_status & 8) == 8:
ret = b''
while len(buf) > 2048:
size = min(struct.unpack('>H', os.urandom(2))[0] % 4096 + 100, len(buf))
ret += b"\x17" + self.tls_version + struct.pack('>H', size) + buf[:size]
buf = buf[size:]
if len(buf) > 0:
ret += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf
return ret
self.handshake_status |= 8
data = self.tls_version + self.pack_auth_data(self.client_id) + b"\x20" + self.client_id + binascii.unhexlify(b"c02f000005ff01000100")
data = b"\x02\x00" + struct.pack('>H', len(data)) + data #server hello
data = b"\x16" + self.tls_version + struct.pack('>H', len(data)) + data
if random.randint(0, 8) < 1:
ticket = os.urandom((struct.unpack('>H', os.urandom(2))[0] % 164) * 2 + 64)
ticket = struct.pack('>H', len(ticket) + 4) + b"\x04\x00" + struct.pack('>H', len(ticket)) + ticket
data += b"\x16" + self.tls_version + ticket #New session ticket
data += b"\x14" + self.tls_version + b"\x00\x01\x01" #ChangeCipherSpec
finish_len = random.choice([32, 40])
data += b"\x16" + self.tls_version + struct.pack('>H', finish_len) + os.urandom(finish_len - 10) #Finished
data += hmac.new(self.server_info.key + self.client_id, data, hashlib.sha1).digest()[:10]
if buf:
data += self.server_encode(buf)
return data
def decode_error_return(self, buf):
self.handshake_status = -1
self.overhead = 0
if self.method == 'tls1.2_ticket_auth' or self.method == 'tls1.2_ticket_fastauth':
return (b'E'*2048, False, False)
return (buf, True, False)
def server_decode(self, buf):
if self.handshake_status == -1:
return (buf, True, False)
if (self.handshake_status & 4) == 4:
ret = b''
self.recv_buffer += buf
while len(self.recv_buffer) > 5:
if ord(self.recv_buffer[0]) != 0x17 or ord(self.recv_buffer[1]) != 0x3 or ord(self.recv_buffer[2]) != 0x3:
logging.info("data = %s" % (binascii.hexlify(self.recv_buffer)))
raise Exception('server_decode appdata error')
size = struct.unpack('>H', self.recv_buffer[3:5])[0]
if len(self.recv_buffer) < size + 5:
break
ret += self.recv_buffer[5:size+5]
self.recv_buffer = self.recv_buffer[size+5:]
return (ret, True, False)
if (self.handshake_status & 1) == 1:
self.recv_buffer += buf
buf = self.recv_buffer
verify = buf
if len(buf) < 11:
raise Exception('server_decode data error')
if not match_begin(buf, b"\x14" + self.tls_version + b"\x00\x01\x01"): #ChangeCipherSpec
raise Exception('server_decode data error')
buf = buf[6:]
if not match_begin(buf, b"\x16" + self.tls_version + b"\x00"): #Finished
raise Exception('server_decode data error')
verify_len = struct.unpack('>H', buf[3:5])[0] + 1 # 11 - 10
if len(verify) < verify_len + 10:
return (b'', False, False)
if hmac.new(self.server_info.key + self.client_id, verify[:verify_len], hashlib.sha1).digest()[:10] != verify[verify_len:verify_len+10]:
raise Exception('server_decode data error')
self.recv_buffer = verify[verify_len + 10:]
status = self.handshake_status
self.handshake_status |= 4
ret = self.server_decode(b'')
return ret;
#raise Exception("handshake data = %s" % (binascii.hexlify(buf)))
self.recv_buffer += buf
buf = self.recv_buffer
ogn_buf = buf
if len(buf) < 3:
return (b'', False, False)
if not match_begin(buf, b'\x16\x03\x01'):
return self.decode_error_return(ogn_buf)
buf = buf[3:]
header_len = struct.unpack('>H', buf[:2])[0]
if header_len > len(buf) - 2:
return (b'', False, False)
self.recv_buffer = self.recv_buffer[header_len + 5:]
self.handshake_status = 1
buf = buf[2:header_len + 2]
if not match_begin(buf, b'\x01\x00'): #client hello
logging.info("tls_auth not client hello message")
return self.decode_error_return(ogn_buf)
buf = buf[2:]
if struct.unpack('>H', buf[:2])[0] != len(buf) - 2:
logging.info("tls_auth wrong message size")
return self.decode_error_return(ogn_buf)
buf = buf[2:]
if not match_begin(buf, self.tls_version):
logging.info("tls_auth wrong tls version")
return self.decode_error_return(ogn_buf)
buf = buf[2:]
verifyid = buf[:32]
buf = buf[32:]
sessionid_len = ord(buf[0])
if sessionid_len < 32:
logging.info("tls_auth wrong sessionid_len")
return self.decode_error_return(ogn_buf)
sessionid = buf[1:sessionid_len + 1]
buf = buf[sessionid_len+1:]
self.client_id = sessionid
sha1 = hmac.new(self.server_info.key + sessionid, verifyid[:22], hashlib.sha1).digest()[:10]
utc_time = struct.unpack('>I', verifyid[:4])[0]
time_dif = common.int32((int(time.time()) & 0xffffffff) - utc_time)
if self.server_info.obfs_param:
try:
self.max_time_dif = int(self.server_info.obfs_param)
except:
pass
if self.max_time_dif > 0 and (time_dif < -self.max_time_dif or time_dif > self.max_time_dif \
or common.int32(utc_time - self.server_info.data.startup_time) < -self.max_time_dif / 2):
logging.info("tls_auth wrong time")
return self.decode_error_return(ogn_buf)
if sha1 != verifyid[22:]:
logging.info("tls_auth wrong sha1")
return self.decode_error_return(ogn_buf)
if self.server_info.data.client_data.get(verifyid[:22]):
logging.info("replay attack detect, id = %s" % (binascii.hexlify(verifyid)))
return self.decode_error_return(ogn_buf)
self.server_info.data.client_data.sweep()
self.server_info.data.client_data[verifyid[:22]] = sessionid
if len(self.recv_buffer) >= 11:
ret = self.server_decode(b'')
return (ret[0], True, True)
# (buffer_to_recv, is_need_decrypt, is_need_to_encode_and_send_back)
buf = buf[48:]
host_name = ''
for index in range(len(buf)):
if index + 4 < len(buf):
if buf[index:index + 4] == b"\x00\x17\x00\x00":
if buf[:index] != '':
host_name = buf[:index]
host_name = host_name.decode('utf-8')
return (b'', False, True, host_name)
|
{
"content_hash": "b0cbcac5725c37a2aae4047097ea4d97",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 230,
"avg_line_length": 44.44897959183673,
"alnum_prop": 0.5550964187327824,
"repo_name": "yalewoosoft/shadowsocks",
"id": "1ecf66df878fd189da990e7c571cc889658e56f7",
"size": "13671",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "shadowsocks/obfsplugin/obfs_tls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groff",
"bytes": "1737"
},
{
"name": "Python",
"bytes": "761805"
},
{
"name": "Shell",
"bytes": "14950"
}
],
"symlink_target": ""
}
|
import json
import os
import tempfile
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
from django.test import (
TestCase as BaseTestCase,
Client as BaseClient
)
import factory
from django_nose.tools import assert_equal
from factory import LazyAttribute, Sequence, SubFactory, SelfAttribute
from factory.django import DjangoModelFactory
from mock import patch
from pontoon.base.models import (
ChangedEntityLocale,
Entity,
Locale,
Project,
ProjectLocale,
Repository,
Resource,
TranslatedResource,
Subpage,
Translation,
TranslationMemoryEntry
)
class PontoonClient(BaseClient):
"""Useful helper methods that can be used in tests."""
def ajax_post(self, url, params):
"""Send data to the ajax-type view."""
return self.post(url, params, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
class TestCase(BaseTestCase):
client_class = PontoonClient
def patch(self, *args, **kwargs):
"""
Wrapper around mock.patch that automatically cleans up the patch
in the test cleanup phase.
"""
patch_obj = patch(*args, **kwargs)
self.addCleanup(patch_obj.stop)
return patch_obj.start()
def patch_object(self, *args, **kwargs):
"""
Wrapper around mock.patch.object that automatically cleans up
the patch in the test cleanup phase.
"""
patch_obj = patch.object(*args, **kwargs)
self.addCleanup(patch_obj.stop)
return patch_obj.start()
class UserFactory(DjangoModelFactory):
username = Sequence(lambda n: 'test%s' % n)
email = Sequence(lambda n: 'test%s@example.com' % n)
class Meta:
model = User
class ProjectFactory(DjangoModelFactory):
name = Sequence(lambda n: 'Project {0}'.format(n))
slug = LazyAttribute(lambda p: slugify(p.name))
links = False
class Meta:
model = Project
@factory.post_generation
def locales(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for locale in extracted:
ProjectLocaleFactory.create(project=self, locale=locale)
@factory.post_generation
def repositories(self, create, extracted, **kwargs):
if not create:
return
if extracted is not None:
for repository in extracted:
self.repositories.add(repository)
else: # Default to a single valid repo.
self.repositories.add(RepositoryFactory.build(), bulk=False)
class ProjectLocaleFactory(DjangoModelFactory):
class Meta:
model = ProjectLocale
class RepositoryFactory(DjangoModelFactory):
project = SubFactory(ProjectFactory)
type = 'git'
url = Sequence(lambda n: 'https://example.com/url_{0}.git'.format(n))
class Meta:
model = Repository
class ResourceFactory(DjangoModelFactory):
project = SubFactory(ProjectFactory)
path = '/fake/path.po'
format = 'po'
total_strings = 1
class Meta:
model = Resource
class LocaleFactory(DjangoModelFactory):
code = Sequence(lambda n: 'en-{0}'.format(n))
name = Sequence(lambda n: 'English #{0}'.format(n))
class Meta:
model = Locale
class EntityFactory(DjangoModelFactory):
resource = SubFactory(ResourceFactory)
string = Sequence(lambda n: 'string {0}'.format(n))
class Meta:
model = Entity
class PluralEntityFactory(DjangoModelFactory):
resource = SubFactory(ResourceFactory)
string = Sequence(lambda n: 'string {0}'.format(n))
string_plural = Sequence(lambda n: 'string plural {0}'.format(n))
class Meta:
model = Entity
class ChangedEntityLocaleFactory(DjangoModelFactory):
entity = SubFactory(EntityFactory)
locale = SubFactory(LocaleFactory)
class Meta:
model = ChangedEntityLocale
class TranslationFactory(DjangoModelFactory):
entity = SubFactory(EntityFactory)
locale = SubFactory(LocaleFactory)
string = Sequence(lambda n: 'translation {0}'.format(n))
user = SubFactory(UserFactory)
class Meta:
model = Translation
class IdenticalTranslationFactory(TranslationFactory):
entity = SubFactory(EntityFactory, string=SelfAttribute('..string'))
class TranslationMemoryFactory(DjangoModelFactory):
source = Sequence(lambda n: 'source {0}'.format(n))
target = Sequence(lambda n: 'target {0}'.format(n))
entity = SubFactory(EntityFactory, string=SelfAttribute('..source'))
locale = SubFactory(LocaleFactory)
class Meta:
model = TranslationMemoryEntry
class TranslatedResourceFactory(DjangoModelFactory):
resource = SubFactory(ResourceFactory)
locale = SubFactory(LocaleFactory)
class Meta:
model = TranslatedResource
class SubpageFactory(DjangoModelFactory):
project = SubFactory(ProjectFactory)
name = Sequence(lambda n: 'subpage {0}'.format(n))
class Meta:
model = Subpage
@factory.post_generation
def resources(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for resource in extracted:
self.resources.add(resource)
def assert_redirects(response, expected_url, status_code=302, host=None, secure=False):
"""
Assert that the given response redirects to the expected URL.
The main difference between this and TestCase.assertRedirects is
that this version doesn't follow the redirect.
"""
if host is None:
host = '{}://{}'.format('https' if secure else 'http', host or 'testserver')
assert_equal(response.status_code, status_code)
assert_equal(response['Location'], host + expected_url)
def assert_attributes_equal(original, **expected_attrs):
"""
Assert that the given object has attributes matching the given
values.
"""
if not expected_attrs:
raise ValueError('Expected some attributes to check.')
for key, value in expected_attrs.items():
original_value = getattr(original, key)
assert_equal(
original_value,
value,
('Attribute `{key}` does not match: {original_value} != {value}'
.format(key=key, original_value=original_value, value=value)),
)
class NOT(object):
"""
A helper class that compares equal to everything except its given
values.
>>> mock_function('foobarbaz')
>>> mock_function.assert_called_with(NOT('fizzbarboff')) # Passes
>>> mock_function.assert_called_with(NOT('foobarbaz')) # Fails
"""
def __init__(self, *values):
self.values = values
def __eq__(self, other):
return other not in self.values
def __ne__(self, other):
return other in self.values
def __repr__(self):
return '<NOT %r>' % self.values
class CONTAINS(object):
"""
Helper class that is considered equal to any object that contains
elements the elements passed to it.
Used mostly in conjunction with Mock.assert_called_with to test if
a string argument contains certain substrings:
>>> mock_function('foobarbaz')
>>> mock_function.assert_called_with(CONTAINS('bar')) # Passes
"""
def __init__(self, *args):
self.items = args
def __eq__(self, other):
return all(item in other for item in self.items)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '<CONTAINS {0}>'.format(','.join(repr(item) for item in self.items))
def create_tempfile(contents):
"""
Create a temporary file with the given contents, and return the path
to the created file.
"""
fd, path = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(contents)
return path
def assert_json(response, expected_obj):
"""
Checks if response contains a expected json object.
"""
assert_equal(json.loads(response.content), expected_obj)
|
{
"content_hash": "e46b7bd3bb7e9680084706282f260ebc",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 87,
"avg_line_length": 26.936241610738254,
"alnum_prop": 0.6589012084215772,
"repo_name": "mastizada/pontoon",
"id": "c66933104892f55e10fab25d6a15505fe14bd737",
"size": "8027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pontoon/base/tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "116831"
},
{
"name": "HTML",
"bytes": "131060"
},
{
"name": "JavaScript",
"bytes": "472460"
},
{
"name": "Makefile",
"bytes": "1085"
},
{
"name": "Python",
"bytes": "841704"
},
{
"name": "Shell",
"bytes": "4616"
}
],
"symlink_target": ""
}
|
from sys import maxsize
class Group:
def __init__(self, group_name=None, group_header=None, group_footer=None, id=None):
self.group_name = group_name
self.group_header = group_header
self.group_footer = group_footer
self.id = id
def __repr__(self):
return '%s:%s' % (self.id, self.group_name)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.group_name == other.group_name
def if_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
class GroupBase:
def __init__(self, app):
self.app = app
def open_group_page(self):
wd = self.app.wd
if not (wd.current_url.endswith('/group.php') and len(wd.find_elements_by_name('new')) > 0):
wd.find_element_by_link_text("groups").click()
def count(self):
wd = self.app.wd
self.open_group_page()
return len(wd.find_elements_by_name("selected[]"))
def validation_of_group_exist(self):
if self.count() == 0:
self.create(Group(group_name='test'))
self.click_group_page()
def group_line(self, field, text):
wd = self.app.wd
if text:
wd.find_element_by_name(field).click()
wd.find_element_by_name(field).clear()
wd.find_element_by_name(field).send_keys(text)
def create(self, Group):
wd = self.app.wd
self.open_group_page()
wd.find_element_by_name("new").click()
self.group_line('group_name', Group.group_name)
self.group_line('group_header', Group.group_header)
self.group_line('group_footer', Group.group_footer)
wd.find_element_by_name("submit").click()
self.group_cache = None
def delete_first_group(self):
self.delete_group_by_index(0)
def click_group_page(self):
wd = self.app.wd
wd.find_element_by_css_selector("div.msgbox").click()
wd.find_element_by_link_text("group page").click()
group_cache = None
def get_group_list(self):
if self.group_cache is None:
wd = self.app.wd
self.open_group_page()
self.group_cache = []
for element in wd.find_elements_by_css_selector('span.group'):
text = element.text
id = element.find_element_by_name('selected[]').get_attribute('value')
self.group_cache.append(Group(group_name=text, id=id))
return list(self.group_cache)
def select_group_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def delete_group_by_index(self, index):
wd = self.app.wd
self.open_group_page()
self.select_group_by_index(index)
wd.find_element_by_name('delete').click()
self.click_group_page()
self.group_cache = None
def edit_group_by_index(self, Group, index):
wd = self.app.wd
self.open_group_page()
wd.find_elements_by_name("selected[]")[index].click()
wd.find_element_by_name("edit").click()
self.group_line('group_name', Group.group_name)
self.group_line('group_header', Group.group_header)
self.group_line('group_footer', Group.group_footer)
wd.find_element_by_name("update").click()
wd.find_element_by_link_text("groups").click()
self.group_cache = None
|
{
"content_hash": "3baf63a44030641b78298ec5756fcc3a",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 115,
"avg_line_length": 30.736842105263158,
"alnum_prop": 0.583904109589041,
"repo_name": "werbk/task-4.12-and-4.13",
"id": "123986345f52abded53d378dbd01681bbc7fc42f",
"size": "3504",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests_group/group_lib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24922"
}
],
"symlink_target": ""
}
|
"""
Contains the dynamic nested sampler class :class:`DynamicSampler` used to
dynamically allocate nested samples. Note that :class:`DynamicSampler`
implicitly wraps a sampler from :mod:`~dynesty.nestedsamplers`. Also contains
the weight function :meth:`weight_function` and stopping function
:meth:`stopping_function`. These are used by default within
:class:`DynamicSampler` if corresponding functions are not provided
by the user.
"""
import sys
import warnings
import math
import copy
from enum import Enum
import numpy as np
from scipy.special import logsumexp
from .nestedsamplers import (UnitCubeSampler, SingleEllipsoidSampler,
MultiEllipsoidSampler, RadFriendsSampler,
SupFriendsSampler)
from .results import Results
from .utils import (get_seed_sequence, get_print_func, _kld_error,
compute_integrals, IteratorResult, IteratorResultShort,
get_enlarge_bootstrap, RunRecord, get_neff_from_logwt,
DelayTimer, save_sampler, restore_sampler)
__all__ = [
"DynamicSampler",
"weight_function",
"stopping_function",
]
_SAMPLERS = {
'none': UnitCubeSampler,
'single': SingleEllipsoidSampler,
'multi': MultiEllipsoidSampler,
'balls': RadFriendsSampler,
'cubes': SupFriendsSampler
}
_LOWL_VAL = -1e300
class DynamicSamplerStatesEnum(Enum):
""" """
INIT = 1 # after the constructor
LIVEPOINTSINIT = 2 # after generating livepoints
INBASE = 3 # during base runs
BASE_DONE = 4 # base run done
INBATCH = 5 # after at least one batch
BATCH_DONE = 6 # after at least one batch
INBASEADDLIVE = 7 # during addition of livepoints in the
INBATCHADDLIVE = 8 # during addition of livepoints in the
# end of the base run
def compute_weights(results):
""" Derive evidence and posterior weights.
return two arrays, evidence weights and posterior weights
"""
logl = results.logl
logz = results.logz # final ln(evidence)
logvol = results.logvol
logwt = results.logwt
samples_n = results.samples_n
# TODO the logic here needs to be verified
logz_remain = logl[-1] + logvol[-1] # remainder
logz_tot = np.logaddexp(logz[-1], logz_remain) # estimated upper bound
lzones = np.ones_like(logz)
logzin = logsumexp([lzones * logz_tot, logz], axis=0,
b=[lzones, -lzones]) # ln(remaining evidence)
logzweight = logzin - np.log(samples_n) # ln(evidence weight)
logzweight -= logsumexp(logzweight) # normalize
zweight = np.exp(logzweight) # convert to linear scale
# Derive posterior weights.
pweight = np.exp(logwt - logz[-1]) # importance weight
pweight /= np.sum(pweight) # normalize
return zweight, pweight
def weight_function(results, args=None, return_weights=False):
"""
The default weight function utilized by :class:`DynamicSampler`.
Zipped parameters are passed to the function via :data:`args`.
Assigns each point a weight based on a weighted average of the
posterior and evidence information content::
weight = pfrac * pweight + (1. - pfrac) * zweight
where `pfrac` is the fractional importance placed on the posterior,
the evidence weight `zweight` is based on the estimated remaining
posterior mass, and the posterior weight `pweight` is the sample's
importance weight.
Returns a set of log-likelihood bounds set by the earliest/latest
samples where `weight > maxfrac * max(weight)`, with additional
left/right padding based on `pad`.
Parameters
----------
results : :class:`Results` instance
:class:`Results` instance.
args : dictionary of keyword arguments, optional
Arguments used to set the log-likelihood bounds used for sampling,
as described above. Default values are `pfrac = 0.8`, `maxfrac = 0.8`,
and `pad = 1`.
return_weights : bool, optional
Whether to return the individual weights (and their components) used
to compute the log-likelihood bounds. Default is `False`.
Returns
-------
logl_bounds : tuple with shape (2,)
Log-likelihood bounds `(logl_min, logl_max)` determined by the weights.
weights : tuple with shape (3,), optional
The individual weights `(pweight, zweight, weight)` used to determine
`logl_bounds`.
"""
# Initialize hyperparameters.
if args is None:
args = {}
pfrac = args.get('pfrac', 0.8)
if not 0. <= pfrac <= 1.:
raise ValueError(
f"The provided `pfrac` {pfrac} is not between 0. and 1.")
maxfrac = args.get('maxfrac', 0.8)
if not 0. <= maxfrac <= 1.:
raise ValueError(
f"The provided `maxfrac` {maxfrac} is not between 0. and 1.")
lpad = args.get('pad', 1)
if lpad < 0:
raise ValueError(f"`lpad` {lpad} is less than zero.")
zweight, pweight = compute_weights(results)
# Compute combined weights.
weight = (1. - pfrac) * zweight + pfrac * pweight
# Compute logl bounds
# we pad by lpad on each side (2lpad total)
# if this brings us outside the range on on side, I add it on another
nsamps = len(weight)
bounds = np.nonzero(weight > maxfrac * np.max(weight))[0]
bounds = (bounds[0] - lpad, bounds[-1] + lpad)
logl = results.logl
if bounds[1] > nsamps - 1:
# overflow on the RHS, so we move the left side
bounds = [bounds[0] - (bounds[1] - (nsamps - 1)), nsamps - 1]
if bounds[0] < 0:
# if we overflow on the leftside we set the edge to -inf and expand
# the RHS
logl_min = -np.inf
logl_max = logl[min(bounds[1] - bounds[0], nsamps - 1)]
else:
logl_min, logl_max = logl[bounds[0]], logl[bounds[1]]
if bounds[1] == nsamps - 1:
logl_max = np.inf
if return_weights:
return (logl_min, logl_max), (pweight, zweight, weight)
else:
return (logl_min, logl_max)
def _get_update_interval_ratio(update_interval, sample, bound, ndim, nlive,
slices, walks):
"""
Get the update_interval divided by the number of live points
"""
if update_interval is None:
if sample == 'unif':
update_interval_frac = 1.5
elif sample == 'rwalk':
update_interval_frac = 0.15 * walks
elif sample == 'slice':
update_interval_frac = 0.9 * ndim * slices
elif sample == 'rslice':
update_interval_frac = 2.0 * slices
elif sample == 'hslice':
update_interval_frac = 25.0 * slices
else:
update_interval_frac = np.inf
warnings.warn(
"No update_interval set with unknown sampling method: "
f"'{sample}'. Defaulting to no updates.")
elif isinstance(update_interval, float):
update_interval_frac = update_interval
elif isinstance(update_interval, int):
update_interval_frac = update_interval * 1. / nlive
else:
raise RuntimeError(f'Strange update_interval value {update_interval}')
if bound == 'none':
update_interval_frac = np.inf
return update_interval_frac
def stopping_function(results,
args=None,
rstate=None,
M=None,
return_vals=False):
"""
The default stopping function utilized by :class:`DynamicSampler`.
Zipped parameters are passed to the function via :data:`args`.
Assigns the run a stopping value based on a weighted average of the
stopping values for the posterior and evidence::
stop = pfrac * stop_post + (1.- pfrac) * stop_evid
The evidence stopping value is based on the estimated evidence error
(i.e. standard deviation) relative to a given threshold::
stop_evid = evid_std / evid_thresh
The posterior stopping value is based on the estimated effective number
of samples.
stop_post = target_n_effective / n_effective
Estimates of the mean and standard deviation are computed using `n_mc`
realizations of the input using a provided `'error'` keyword (either
`'jitter'` or `'resample'`, which call related functions :meth:`jitter_run`
and :meth:`resample_run` in :mod:`dynesty.utils`, respectively.
Returns the boolean `stop <= 1`. If `True`, the :class:`DynamicSampler`
will stop adding new samples to our results.
Parameters
----------
results : :class:`Results` instance
:class:`Results` instance.
args : dictionary of keyword arguments, optional
Arguments used to set the stopping values. Default values are
`pfrac = 1.0`, `evid_thresh = 0.1`, `target_n_effective = 10000`,
`n_mc = 0`, `error = 'jitter'`, and `approx = True`.
rstate : `~numpy.random.Generator`, optional
`~numpy.random.Generator` instance.
M : `map` function, optional
An alias to a `map`-like function. This allows users to pass
functions from pools (e.g., `pool.map`) to compute realizations in
parallel. By default the standard `map` function is used.
return_vals : bool, optional
Whether to return the stopping value (and its components). Default
is `False`.
Returns
-------
stop_flag : bool
Boolean flag indicating whether we have passed the desired stopping
criteria.
stop_vals : tuple of shape (3,), optional
The individual stopping values `(stop_post, stop_evid, stop)` used
to determine the stopping criteria.
"""
# Initialize values.
if args is None:
args = {}
if M is None:
M = map
# Initialize hyperparameters.
pfrac = args.get('pfrac', 1.0)
if not 0. <= pfrac <= 1.:
raise ValueError(
f"The provided `pfrac` {pfrac} is not between 0. and 1.")
evid_thresh = args.get('evid_thresh', 0.1)
if pfrac < 1. and evid_thresh < 0.:
raise ValueError(
f"The provided `evid_thresh` {evid_thresh} is not non-negative "
f"even though `pfrac` is {pfrac}.")
target_n_effective = args.get('target_n_effective', 10000)
if pfrac > 0. and target_n_effective < 0.:
raise ValueError(
f"The provided `target_n_effective` {target_n_effective} " +
f"is not non-negative even though `pfrac` is {pfrac}")
n_mc = args.get('n_mc', 0)
if n_mc < 0:
raise ValueError(f"The number of realizations {n_mc} must be greater "
"or equal to zero.")
if n_mc > 0 and n_mc < 20:
warnings.warn("Using a small number of realizations might result in "
"excessively noisy stopping value estimates.")
error = args.get('error', 'jitter')
if error not in {'jitter', 'resample'}:
raise ValueError(f"The chosen `'error'` option {error} is not valid.")
approx = args.get('approx', True)
if n_mc > 1:
# Compute realizations of ln(evidence) and the KL divergence.
rlist = [results for i in range(n_mc)]
error_list = [error for i in range(n_mc)]
approx_list = [approx for i in range(n_mc)]
seeds = get_seed_sequence(rstate, n_mc)
args = zip(rlist, error_list, approx_list, seeds)
outputs = list(M(_kld_error, args))
lnz_arr = np.array([res[1].logz[-1] for res in outputs])
# Evidence stopping value.
lnz_std = np.std(lnz_arr)
else:
lnz_std = results.logzerr[-1]
stop_evid = lnz_std / evid_thresh
n_effective = get_neff_from_logwt(results.logwt)
stop_post = target_n_effective / n_effective
# Effective stopping value.
stop = pfrac * stop_post + (1. - pfrac) * stop_evid
if return_vals:
return stop <= 1., (stop_post, stop_evid, stop)
else:
return stop <= 1.
def initialize_live_points(live_points,
prior_transform,
loglikelihood,
M,
nlive=None,
npdim=None,
rstate=None,
blob=False,
use_pool_ptform=None):
"""
Initialize the first set of live points before starting the sampling
Parameters:
live_points: tuple of arrays or None
This can be either none or tuple of 3 arrays (u, v, logl), i.e.
point location in cube coordinates, point location in original
coordinates, and logl values
prior_transform: function
log_likelihood: function
M: function
The function supporting parallel calls like M(func, list)
nlive: int
Number of live-points
npdim: int
Number of dimensions
rstate: :class: numpy.random.RandomGenerator
use_pool_ptform: bool or None
The flag to perform prior transform using multiprocessing pool or not
Returns:
(live_u, live_v, live_logl, blobs): tuple
The tuple of arrays.
The first is in unit cube coordinates.
The second is in the original coordinates.
The third are the log-likelihood valuess.
The fourth are the array of blobs (or None)
"""
if live_points is None:
# If no live points are provided, propose them by randomly
# sampling from the unit cube.
n_attempts = 100
for _ in range(n_attempts):
live_u = rstate.random(size=(nlive, npdim))
if use_pool_ptform:
live_v = M(prior_transform, np.asarray(live_u))
else:
live_v = map(prior_transform, np.asarray(live_u))
live_v = np.array(list(live_v))
live_logl = loglikelihood.map(np.asarray(live_v))
if blob:
live_blobs = np.array([_.blob for _ in live_logl])
live_logl = np.array([_.val for _ in live_logl])
# Convert all `-np.inf` log-likelihoods to finite large
# numbers. Necessary to keep estimators in our sampler from
# breaking.
for i, logl in enumerate(live_logl):
if not np.isfinite(logl):
if np.sign(logl) < 0:
live_logl[i] = _LOWL_VAL
else:
raise ValueError(
f"The log-likelihood ({logl}) of live "
f"point {i} located at u={live_u[i]} "
f"v={live_v[i]} is invalid.")
# Check to make sure there is at least one finite
# log-likelihood value within the initial set of live
# points.
if np.any(live_logl != _LOWL_VAL):
break
else:
# If we found nothing after many attempts, raise the alarm.
raise RuntimeError(f"After {n_attempts} attempts, not a single "
"live "
"point had a valid log-likelihood! Please "
"check your prior transform and/or "
"log-likelihood.")
else:
# If live points were provided, convert the log-likelihoods and
# then run a quick safety check.
live_u, live_v, live_logl, blobs = live_points
for i, logl in enumerate(live_logl):
if not np.isfinite(logl):
if np.sign(logl) < 0:
live_logl[i] = _LOWL_VAL
else:
raise ValueError("The log-likelihood ({0}) of live "
"point {1} located at u={2} v={3} "
" is invalid.".format(
logl, i, live_u[i], live_v[i]))
if all(live_logl == _LOWL_VAL):
raise ValueError("Not a single provided live point has a "
"valid log-likelihood!")
if (np.ptp(live_logl) == 0):
warnings.warn(
'All the initial likelihood values are the same. '
'You likely have a plateau in the likelihood. '
'Nested sampling is *NOT* guaranteed to work in this case',
RuntimeWarning)
if not blob:
live_blobs = None
return (live_u, live_v, live_logl, live_blobs)
class DynamicSampler:
"""
A dynamic nested sampler that allocates live points adaptively during
a single run according to a specified weight function until a specified
stopping criteria is reached.
Parameters
----------
loglikelihood : function
Function returning ln(likelihood) given parameters as a 1-d `~numpy`
array of length `ndim`.
prior_transform : function
Function transforming a sample from the a unit cube to the parameter
space of interest according to the prior.
npdim : int, optional
Number of parameters accepted by `prior_transform`.
bound : {`'none'`, `'single'`, `'multi'`, `'balls'`, `'cubes'`}, optional
Method used to approximately bound the prior using the current
set of live points. Conditions the sampling methods used to
propose new live points.
method : {`'unif'`, `'rwalk'`,
`'slice'`, `'rslice'`, `'hslice'`}, optional
Method used to sample uniformly within the likelihood constraint,
conditioned on the provided bounds.
update_interval : int
Only update the bounding distribution every `update_interval`-th
likelihood call.
first_update : dict
A dictionary containing parameters governing when the sampler should
first update the bounding distribution from the unit cube to the one
specified by the user.
rstate : `~numpy.random.Generator`
`~numpy.random.Generator` instance.
queue_size: int
Carry out likelihood evaluations in parallel by queueing up new live
point proposals using (at most) this many threads/members.
pool: pool
Use this pool of workers to execute operations in parallel.
use_pool : dict
A dictionary containing flags indicating where the provided `pool`
should be used to execute operations in parallel.
ncdim: int
Number of clustered dimensions
nlive0: int
Default number of live points to use
kwargs : dict, optional
A dictionary of additional parameters (described below).
"""
def __init__(self, loglikelihood, prior_transform, npdim, bound, method,
update_interval_ratio, first_update, rstate, queue_size, pool,
use_pool, ncdim, nlive0, kwargs):
# distributions
self.loglikelihood = loglikelihood
self.prior_transform = prior_transform
self.npdim = npdim
self.ncdim = ncdim
self.blob = kwargs.get('blob') or False
# bounding/sampling
self.bounding = bound
self.method = method
self.update_interval_ratio = update_interval_ratio
self.first_update = first_update
# internal sampler object
self.sampler = None
# extra arguments
self.kwargs = kwargs
self.enlarge, self.bootstrap = get_enlarge_bootstrap(
method, kwargs.get('enlarge'), kwargs.get('bootstrap'))
self.walks = self.kwargs.get('walks', 25)
self.slices = self.kwargs.get('slices', 3)
self.cite = self.kwargs.get('cite')
self.custom_update = self.kwargs.get('update_func')
# random state
self.rstate = rstate
# parallelism
self.queue_size = queue_size
self.pool = pool
if self.pool is None:
self.M = map
else:
self.M = pool.map
self.use_pool = use_pool # provided flags for when to use the pool
self.use_pool_ptform = use_pool.get('prior_transform', True)
self.use_pool_logl = use_pool.get('loglikelihood', True)
self.use_pool_evolve = use_pool.get('propose_point', True)
self.use_pool_update = use_pool.get('update_bound', True)
self.use_pool_stopfn = use_pool.get('stop_function', True)
# sampling details
self.it = 1 # number of iterations
self.batch = 0 # number of batches allocated dynamically
self.ncall = 0 # number of function calls
self.bound = [] # initial states used to compute bounds
self.eff = 1. # sampling efficiency
self.base = False # base run complete
self.nlive0 = nlive0
self.internal_state = DynamicSamplerStatesEnum.INIT
self.saved_run = RunRecord(dynamic=True)
self.base_run = RunRecord(dynamic=True)
self.new_run = RunRecord(dynamic=True)
self.new_logl_min, self.new_logl_max = -np.inf, np.inf # logl bounds
# these are set-up during sampling
self.live_u = None
self.live_v = None
self.live_it = None
self.live_bound = None
self.live_logl = None
self.live_init = None
self.nlive_init = None
self.batch_sampler = None
def __setstate__(self, state):
self.__dict__ = state
self.pool = None
self.M = map
def __getstate__(self):
"""Get state information for pickling."""
state = self.__dict__.copy()
# deal with pool
del state['pool'] # remove pool
del state['M'] # remove `pool.map` function hook
return state
def save(self, fname):
"""
Save the state of the dynamic sampler in a file
Parameters
----------
fname: string
Filename of the save file.
"""
save_sampler(self, fname)
@staticmethod
def restore(fname, pool=None):
"""
Restore the dynamic sampler from a file.
It is assumed that the file was created using .save() method
of DynamicNestedSampler or as a result of checkpointing during
run_nested()
Parameters
----------
fname: string
Filename of the save file.
pool: object(optional)
The multiprocessing pool-like object that supports map()
calls that will be used in the restored object.
"""
return restore_sampler(fname, pool=pool)
def __get_update_interval(self, update_interval, nlive):
if not isinstance(update_interval, int):
if isinstance(update_interval, float):
cur_update_interval_ratio = update_interval
elif update_interval is None:
cur_update_interval_ratio = self.update_interval_ratio
else:
raise RuntimeError(
str.format('Weird update_interval value {}',
update_interval))
update_interval = int(
max(
min(np.round(cur_update_interval_ratio * nlive),
sys.maxsize), 1))
return update_interval
def reset(self):
"""Re-initialize the sampler."""
# sampling
self.it = 1
self.batch = 0
self.ncall = 0
self.bound = []
self.eff = 1.
self.base = False
self.saved_run = RunRecord(dynamic=True)
self.base_run = RunRecord(dynamic=True)
self.new_run = RunRecord(dynamic=True)
self.new_logl_min, self.new_logl_max = -np.inf, np.inf
@property
def results(self):
"""Saved results from the dynamic nested sampling run. All saved
bounds are also returned."""
d = {}
for k in [
'nc', 'v', 'id', 'batch', 'it', 'u', 'n', 'logwt', 'logl',
'logvol', 'logz', 'logzvar', 'h', 'batch_nlive',
'batch_bounds', 'blob'
]:
d[k] = np.array(self.saved_run[k])
# Add all saved samples (and ancillary quantities) to the results.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
results = [('niter', self.it - 1), ('ncall', d['nc']),
('eff', self.eff), ('samples', d['v'])]
for k in ['id', 'batch', 'it', 'u', 'n']:
results.append(('samples_' + k, d[k]))
for k in [
'logwt', 'logl', 'logvol', 'logz', 'batch_nlive',
'batch_bounds', 'blob'
]:
results.append((k, d[k]))
results.append(('logzerr', np.sqrt(d['logzvar'])))
results.append(('information', d['h']))
# Add any saved bounds (and ancillary quantities) to the results.
if self.sampler.save_bounds:
results.append(('bound', copy.deepcopy(self.bound)))
results.append(
('bound_iter', np.array(self.saved_run['bounditer'])))
results.append(
('samples_bound', np.array(self.saved_run['boundidx'])))
results.append(('scale', np.array(self.saved_run['scale'])))
return Results(results)
@property
def n_effective(self):
"""
Estimate the effective number of posterior samples using the Kish
Effective Sample Size (ESS) where `ESS = sum(wts)^2 / sum(wts^2)`.
Note that this is `len(wts)` when `wts` are uniform and
`1` if there is only one non-zero element in `wts`.
"""
logwt = self.saved_run['logwt']
if len(logwt) == 0 or np.isneginf(np.max(logwt)):
# If there are no saved weights, or its -inf return 0.
return 0
else:
return get_neff_from_logwt(np.asarray(logwt))
@property
def citations(self):
"""
Return list of papers that should be cited given the specified
configuration of the sampler.
"""
return self.cite
def sample_initial(self,
nlive=None,
update_interval=None,
first_update=None,
maxiter=None,
maxcall=None,
logl_max=np.inf,
dlogz=0.01,
n_effective=np.inf,
live_points=None,
save_samples=False,
resume=False):
"""
Generate a series of initial samples from a nested sampling
run using a fixed number of live points using an internal
sampler from :mod:`~dynesty.nestedsamplers`. Instantiates a
generator that will be called by the user.
Parameters
----------
nlive : int, optional
The number of live points to use for the baseline nested
sampling run. Default is either nlive0 parameter of 500
update_interval : int or float, optional
If an integer is passed, only update the bounding distribution
every `update_interval`-th likelihood call. If a float is passed,
update the bound after every `round(update_interval * nlive)`-th
likelihood call. Larger update intervals can be more efficient
when the likelihood function is quick to evaluate. If no value is
provided, defaults to the value passed during initialization.
first_update : dict, optional
A dictionary containing parameters governing when the sampler will
first update the bounding distribution from the unit cube
(`'none'`) to the one specified by `sample`.
maxiter : int, optional
Maximum number of iterations. Iteration may stop earlier if the
termination condition is reached. Default is `sys.maxsize`
(no limit).
maxcall : int, optional
Maximum number of likelihood evaluations. Iteration may stop
earlier if termination condition is reached. Default is
`sys.maxsize` (no limit).
dlogz : float, optional
Iteration will stop when the estimated contribution of the
remaining prior volume to the total evidence falls below
this threshold. Explicitly, the stopping criterion is
`ln(z + z_est) - ln(z) < dlogz`, where `z` is the current
evidence from all saved samples and `z_est` is the estimated
contribution from the remaining volume. The default is
`0.01`.
logl_max : float, optional
Iteration will stop when the sampled ln(likelihood) exceeds the
threshold set by `logl_max`. Default is no bound (`np.inf`).
n_effective: int, optional
This option is deprecated and will be removed in a future release.
live_points : list of 3 `~numpy.ndarray` each with shape (nlive, ndim)
A set of live points used to initialize the nested sampling run.
Contains `live_u`, the coordinates on the unit cube, `live_v`, the
transformed variables, and `live_logl`, the associated
loglikelihoods. By default, if these are not provided the initial
set of live points will be drawn from the unit `npdim`-cube.
**WARNING: It is crucial that the initial set of live points have
been sampled from the prior. Failure to provide a set of valid
live points will lead to incorrect results.**
Returns
-------
worst : int
Index of the live point with the worst likelihood. This is our
new dead point sample.
ustar : `~numpy.ndarray` with shape (npdim,)
Position of the sample.
vstar : `~numpy.ndarray` with shape (ndim,)
Transformed position of the sample.
loglstar : float
Ln(likelihood) of the sample.
logvol : float
Ln(prior volume) within the sample.
logwt : float
Ln(weight) of the sample.
logz : float
Cumulative ln(evidence) up to the sample (inclusive).
logzvar : float
Estimated cumulative variance on `logz` (inclusive).
h : float
Cumulative information up to the sample (inclusive).
nc : int
Number of likelihood calls performed before the new
live point was accepted.
worst_it : int
Iteration when the live (now dead) point was originally proposed.
boundidx : int
Index of the bound the dead point was originally drawn from.
bounditer : int
Index of the bound being used at the current iteration.
eff : float
The cumulative sampling efficiency (in percent).
delta_logz : float
The estimated remaining evidence expressed as the ln(ratio) of the
current evidence.
"""
# Check for deprecated options
if n_effective is not np.inf:
with warnings.catch_warnings():
warnings.filterwarnings("once")
warnings.warn(
"The n_effective option to DynamicSampler.sample_initial "
"is deprecated and will be removed in future releases",
DeprecationWarning)
# Initialize inputs.
if maxcall is None:
maxcall = sys.maxsize
if maxiter is None:
maxiter = sys.maxsize
nlive = nlive or self.nlive0
update_interval = self.__get_update_interval(update_interval, nlive)
if nlive <= 2 * self.ncdim:
warnings.warn("Beware: `nlive_init <= 2 * ndim`!")
if not resume:
# Reset saved results to avoid any possible conflicts.
self.reset()
(self.live_u, self.live_v, self.live_logl,
blobs) = initialize_live_points(
live_points,
self.prior_transform,
self.loglikelihood,
self.M,
nlive=nlive,
npdim=self.npdim,
rstate=self.rstate,
blob=self.blob,
use_pool_ptform=self.use_pool_ptform)
if self.blob:
self.live_blobs = blobs
else:
self.live_blobs = None
self.nlive_init = len(self.live_u)
# (Re-)bundle live points.
live_points = [
self.live_u, self.live_v, self.live_logl, self.live_blobs
]
self.live_init = [np.array(_) for _ in live_points]
self.ncall += self.nlive_init
self.live_bound = np.zeros(self.nlive_init, dtype=int)
self.live_it = np.zeros(self.nlive_init, dtype=int)
bounding = self.bounding
if first_update is None:
first_update = self.first_update
self.sampler = _SAMPLERS[bounding](self.loglikelihood,
self.prior_transform,
self.npdim,
self.live_init,
self.method,
update_interval,
first_update,
self.rstate,
self.queue_size,
self.pool,
self.use_pool,
ncdim=self.ncdim,
kwargs=self.kwargs,
blob=self.blob)
self.bound = self.sampler.bound
self.internal_state = DynamicSamplerStatesEnum.LIVEPOINTSINIT
# Run the sampler internally as a generator.
for it, results in enumerate(
self.sampler.sample(maxiter=maxiter,
save_samples=save_samples,
maxcall=maxcall,
dlogz=dlogz)):
# Grab results.
# Save our base run (which we will use later).
add_info = dict(id=results.worst,
u=results.ustar,
v=results.vstar,
logl=results.loglstar,
logvol=results.logvol,
logwt=results.logwt,
logz=results.logz,
logzvar=results.logzvar,
h=results.h,
nc=results.nc,
it=results.worst_it,
n=self.nlive_init,
blob=results.blob,
boundidx=results.boundidx,
bounditer=results.bounditer,
scale=self.sampler.scale)
self.base_run.append(add_info)
self.saved_run.append(add_info)
# Increment relevant counters.
self.ncall += results.nc
self.eff = 100. * self.it / self.ncall
self.it += 1
self.internal_state = DynamicSamplerStatesEnum.INBASE
yield IteratorResult(worst=results.worst,
ustar=results.ustar,
vstar=results.vstar,
loglstar=results.loglstar,
logvol=results.logvol,
logwt=results.logwt,
logz=results.logz,
logzvar=results.logzvar,
h=results.h,
nc=results.nc,
blob=results.blob,
worst_it=results.worst_it,
boundidx=results.boundidx,
bounditer=results.bounditer,
eff=self.eff,
delta_logz=results.delta_logz)
self.internal_state = DynamicSamplerStatesEnum.INBASEADDLIVE
for it, results in enumerate(self.sampler.add_live_points()):
# Grab results.
add_info = dict(id=results.worst,
u=results.ustar,
v=results.vstar,
logl=results.loglstar,
logvol=results.logvol,
logwt=results.logwt,
logz=results.logz,
logzvar=results.logzvar,
h=results.h,
blob=results.blob,
nc=results.nc,
it=results.worst_it,
n=self.nlive_init - it,
boundidx=results.boundidx,
bounditer=results.bounditer,
scale=self.sampler.scale)
self.base_run.append(add_info)
self.saved_run.append(add_info)
# Increment relevant counters.
self.eff = 100. * self.it / self.ncall
self.it += 1
yield IteratorResult(worst=results.worst,
ustar=results.ustar,
vstar=results.vstar,
loglstar=results.loglstar,
logvol=results.logvol,
logwt=results.logwt,
logz=results.logz,
logzvar=results.logzvar,
h=results.h,
blob=results.blob,
nc=results.nc,
worst_it=results.worst_it,
boundidx=results.boundidx,
bounditer=results.bounditer,
eff=self.eff,
delta_logz=results.delta_logz)
new_vals = {}
(new_vals['logwt'], new_vals['logz'], new_vals['logzvar'],
new_vals['h']) = compute_integrals(logl=self.saved_run['logl'],
logvol=self.saved_run['logvol'])
for curk in ['logwt', 'logz', 'logzvar', 'h']:
self.saved_run[curk] = new_vals[curk].tolist()
self.base_run[curk] = new_vals[curk].tolist()
self.base = True # baseline run complete
self.saved_run['batch'] = np.zeros(len(self.saved_run['id']),
dtype=int) # batch
self.saved_run['batch_nlive'].append(self.nlive_init) # initial nlive
self.saved_run['batch_bounds'].append(
(-np.inf, np.inf)) # initial bounds
self.internal_state = DynamicSamplerStatesEnum.BASE_DONE
def sample_batch(self,
dlogz=0.01,
nlive_new=None,
update_interval=None,
logl_bounds=None,
maxiter=None,
maxcall=None,
save_bounds=True,
resume=False):
"""
Generate an additional series of nested samples that will be combined
with the previous set of dead points. Works by hacking the internal
`sampler` object.
Instantiates a generator that will be called by the user.
Parameters
----------
nlive_new : int
Number of new live points to be added. Default is `500`.
update_interval : int or float, optional
If an integer is passed, only update the bounding distribution
every `update_interval`-th likelihood call. If a float is passed,
update the bound after every `round(update_interval * nlive)`-th
likelihood call. Larger update intervals can be more efficient
when the likelihood function is quick to evaluate. If no value is
provided, defaults to the value passed during initialization.
logl_bounds : tuple of size (2,), optional
The ln(likelihood) bounds used to bracket the run. If `None`,
the default bounds span the entire range covered by the
original run.
maxiter : int, optional
Maximum number of iterations. Iteration may stop earlier if the
termination condition is reached. Default is `sys.maxsize`
(no limit).
maxcall : int, optional
Maximum number of likelihood evaluations. Iteration may stop
earlier if termination condition is reached. Default is
`sys.maxsize` (no limit).
save_bounds : bool, optional
Whether or not to save past distributions used to bound
the live points internally. Default is `True`.
dlogz : float, optional
The stopping point in terms of remaining delta(logz)
Returns
-------
worst : int
Index of the live point with the worst likelihood. This is our
new dead point sample. **Negative values indicate the index
of a new live point generated when initializing a new batch.**
ustar : `~numpy.ndarray` with shape (npdim,)
Position of the sample.
vstar : `~numpy.ndarray` with shape (ndim,)
Transformed position of the sample.
loglstar : float
Ln(likelihood) of the sample.
nc : int
Number of likelihood calls performed before the new
live point was accepted.
worst_it : int
Iteration when the live (now dead) point was originally proposed.
boundidx : int
Index of the bound the dead point was originally drawn from.
bounditer : int
Index of the bound being used at the current iteration.
eff : float
The cumulative sampling efficiency (in percent).
"""
# Initialize default values.
maxcall = maxcall or sys.maxsize
maxiter = maxiter or sys.maxsize
maxiter_left = maxiter # how many iterations we have left
nlive_new = nlive_new or self.nlive0
if nlive_new <= 2 * self.ncdim:
warnings.warn("Beware: `nlive_batch <= 2 * ndim`!")
# Grab results from saved run.
saved_u = np.array(self.saved_run['u'])
saved_v = np.array(self.saved_run['v'])
saved_logl = np.array(self.saved_run['logl'])
saved_logvol = np.array(self.saved_run['logvol'])
saved_scale = np.array(self.saved_run['scale'])
saved_blobs = np.array(self.saved_run['blob'])
nblive = self.nlive_init
update_interval = self.__get_update_interval(update_interval,
nlive_new)
if not resume:
first_points = []
# This will be a list of first points yielded from
# this batch before we start proper sampling
batch_sampler = _SAMPLERS[self.bounding](self.loglikelihood,
self.prior_transform,
self.npdim,
self.live_init,
self.method,
update_interval,
self.first_update,
self.rstate,
self.queue_size,
self.pool,
self.use_pool,
ncdim=self.ncdim,
kwargs=self.kwargs,
blob=self.blob)
self.batch_sampler = batch_sampler
batch_sampler.save_bounds = save_bounds
# Reset "new" results.
self.new_run = RunRecord(dynamic=True)
# Initialize ln(likelihood) bounds.
if logl_bounds is None:
logl_min, logl_max = -np.inf, max(saved_logl[:-nblive])
else:
logl_min, logl_max = logl_bounds
self.new_logl_min, self.new_logl_max = logl_min, logl_max
# Check whether the lower bound encompasses all previous saved
# samples.
psel = np.all(logl_min <= saved_logl)
if psel:
# If the lower bound encompasses all saved samples, we want
# to propose a new set of points from the unit cube.
live_u, live_v, live_logl, live_blobs = initialize_live_points(
None,
self.prior_transform,
self.loglikelihood,
self.M,
nlive=nlive_new,
npdim=self.npdim,
rstate=self.rstate,
blob=self.blob,
use_pool_ptform=self.use_pool_ptform)
live_bound = np.zeros(nlive_new, dtype=int)
live_it = np.zeros(nlive_new, dtype=int) + self.it
live_nc = np.ones(nlive_new, dtype=int)
self.ncall += nlive_new
# Return live points in generator format.
for i in range(nlive_new):
if self.blob:
curblob = live_blobs[i]
else:
curblob = None
first_points.append(
IteratorResultShort(worst=-i - 1,
ustar=live_u[i],
vstar=live_v[i],
loglstar=live_logl[i],
nc=live_nc[i],
worst_it=live_it[i],
blob=curblob,
boundidx=0,
bounditer=0,
eff=self.eff))
else:
# If the lower bound doesn't encompass all base samples,
# we need to create a uniform sample from the prior subject
# to the likelihood boundary constraint
subset0 = np.nonzero(saved_logl > logl_min)[0]
if len(subset0) == 0:
raise RuntimeError(
'Could not find live points in the '
'required logl interval. Please report!\n'
f'Diagnostics. logl_min: {logl_min} '
f'logl_bounds: {logl_bounds} '
f'saved_loglmax: {saved_logl.max()}')
# Also if we don't have enough live points above the boundary
# we simply go down to collect our nblive points
if len(subset0) < nblive:
if subset0[-1] < nblive:
# It means we don't even have nblive points
# in our base runs so we just take everything
subset0 = np.arange(len(saved_logl))
else:
# otherwise we just move the boundary down
# to collect our nblive points
subset0 = np.arange(subset0[-1] - nblive + 1,
subset0[-1] + 1)
# IMPORTANT We have to update the lower bound for sampling
# otherwise some of our live points do not satisfy it
logl_min = saved_logl[subset0[0]]
self.new_logl_min = logl_min
live_scale = saved_scale[subset0[0]]
# set the scale based on the lowest point
# we are weighting each point by X_i to ensure
# uniformyish sampling within boundary volume
# It doesn't have to be super uniform as we'll sample
# again, but still
cur_log_uniwt = saved_logvol[subset0]
cur_uniwt = np.exp(cur_log_uniwt - cur_log_uniwt.max())
cur_uniwt = cur_uniwt / cur_uniwt.sum()
# I normalize in linear space rather then using logsumexp
# because cur_uniwt.sum() needs to be 1 for random.choice
# we are now randomly sampling with weights
# notice that since we are sampling without
# replacement we aren't guaranteed to be able
# to get nblive points
# so we get min(nblive,subset.sum())
# in that case the sample technically won't be
# uniform
n_pos_weight = (cur_uniwt > 0).sum()
subset = self.rstate.choice(subset0,
size=min(nblive, n_pos_weight),
p=cur_uniwt,
replace=False)
# subset will now have indices of selected points from
# saved_* arrays
cur_nblive = len(subset)
if cur_nblive == 1:
raise RuntimeError('Only one live point is selected\n' +
'Please report the error on github!' +
f'Diagnostics nblive: {nblive} ' +
f'cur_nblive: {cur_nblive}' +
f'n_pos_weight: {n_pos_weight}' +
f'cur_wt: {cur_uniwt}')
# We are doing copies here, because live_* stuff is
# updated in place
live_u = saved_u[subset, :].copy()
live_v = saved_v[subset, :].copy()
live_logl = saved_logl[subset].copy()
live_blobs = saved_blobs[subset].copy()
# Hack the internal sampler by overwriting the live points
# and scale factor.
batch_sampler.nlive = cur_nblive
batch_sampler.live_u = live_u
batch_sampler.live_v = live_v
batch_sampler.live_logl = live_logl
batch_sampler.scale = live_scale
batch_sampler.live_blobs = live_blobs
# Trigger an update of the internal bounding distribution based
# on the "new" set of live points.
bound = batch_sampler.update()
if save_bounds:
batch_sampler.bound.append(copy.deepcopy(bound))
batch_sampler.nbound += 1
batch_sampler.since_update = 0
batch_sampler.logl_first_update = logl_min
# Sample a new batch of `nlive_new` live points using the
# internal sampler given the `logl_min` constraint.
live_u = np.empty((nlive_new, self.npdim))
live_v = np.empty((nlive_new, saved_v.shape[1]))
live_logl = np.empty(nlive_new)
live_bound = np.zeros(nlive_new, dtype=int)
live_it = np.empty(nlive_new, dtype=int)
live_nc = np.empty(nlive_new, dtype=int)
if self.blob:
live_blobs = []
else:
live_blobs = None
for i in range(nlive_new):
newpt = batch_sampler._new_point(logl_min)
(live_u[i], live_v[i], live_logl[i], live_nc[i]) = newpt
if self.blob:
blob = newpt[2].blob
live_blobs.append(blob)
else:
blob = None
live_it[i] = self.it
self.ncall += live_nc[i]
# Return live points in generator format.
first_points.append(
IteratorResultShort(worst=-i - 1,
ustar=live_u[i],
vstar=live_v[i],
loglstar=live_logl[i],
blob=blob,
nc=live_nc[i],
worst_it=live_it[i],
boundidx=live_bound[i],
bounditer=live_bound[i],
eff=self.eff))
maxiter_left -= nlive_new
# Overwrite the previous set of live points in our internal sampler
# with the new batch of points we just generated.
batch_sampler.nlive = nlive_new
# All the arrays are newly created in this function
# We don't need to worry about them being parts of other arrays
batch_sampler.live_u = live_u
batch_sampler.live_v = live_v
batch_sampler.live_logl = live_logl
batch_sampler.live_bound = live_bound
batch_sampler.live_blobs = live_blobs
batch_sampler.live_it = live_it
batch_sampler.it = self.it + 1
# Trigger an update of the internal bounding distribution (again).
if not psel:
bound = batch_sampler.update()
if save_bounds:
batch_sampler.bound.append(copy.deepcopy(bound))
batch_sampler.nbound += 1
batch_sampler.since_update = 0
batch_sampler.logl_first_update = logl_min
# Copy over bound reference.
self.bound = batch_sampler.bound
# Update internal ln(prior volume)-based quantities
if self.new_logl_min == -np.inf:
vol_idx = 0
else:
vol_idx = np.argmin(
np.abs(
np.asarray(self.saved_run['logl']) -
self.new_logl_min)) + 1
# truncate information in the saver of the internal sampler
for k in batch_sampler.saved_run.keys():
batch_sampler.saved_run[k] = self.saved_run[k][:vol_idx]
batch_sampler.dlv = math.log((nlive_new + 1.) / nlive_new)
# Tell the sampler *not* to try and remove the previous addition of
# live points. All the hacks above make the internal results
# garbage anyways.
batch_sampler.added_live = False
# Run the sampler internally as a generator until we hit
# the lower likelihood threshold. Afterwards, we add in our
# remaining live points *as if* we had terminated the run.
# This allows us to
# sample past the original bounds "for free".
batch_sampler.first_points = first_points
# We save these points in the object to ensure we can
# resume from an interrupted run
else:
batch_sampler = self.batch_sampler
logl_min, logl_max = self.new_logl_min, self.new_logl_max
live_nc = np.zeros(nlive_new, dtype=int)
first_points = batch_sampler.first_points
# TODO FIX whether live_nc should be restored
for i in range(len(first_points)):
yield first_points.pop(0)
iterated_batch = False
# To identify if the loop below was executed or not
maxcall_left = maxcall - np.sum(live_nc)
for it, results in enumerate(
batch_sampler.sample(dlogz=dlogz,
logl_max=logl_max,
maxiter=maxiter_left,
maxcall=maxcall_left,
save_samples=True,
save_bounds=save_bounds,
resume=resume)):
# Save results.
D = dict(id=results.worst,
u=results.ustar,
v=results.vstar,
logl=results.loglstar,
nc=results.nc,
it=results.worst_it,
blob=results.blob,
n=nlive_new,
boundidx=results.boundidx,
bounditer=results.bounditer,
scale=batch_sampler.scale)
self.new_run.append(D)
# Increment relevant counters.
self.ncall += results.nc
self.eff = 100. * self.it / self.ncall
self.it += 1
maxiter_left -= 1
maxcall_left -= results.nc
iterated_batch = True
self.internal_state = DynamicSamplerStatesEnum.INBATCH
yield IteratorResultShort(worst=results.worst,
ustar=results.ustar,
vstar=results.vstar,
loglstar=results.loglstar,
nc=results.nc,
blob=results.blob,
worst_it=results.worst_it,
boundidx=results.boundidx,
bounditer=results.bounditer,
eff=self.eff)
if iterated_batch and results.loglstar < logl_max and np.isfinite(
logl_max) and maxiter_left > 0 and maxcall_left > 0:
warnings.warn('Warning. The maximum likelihood not reached '
'in the batch. '
'You may not have enough livepoints')
self.internal_state = DynamicSamplerStatesEnum.INBATCHADDLIVE
if not iterated_batch and len(batch_sampler.saved_run['logl']) == 0:
# This is a special case *if* we only sampled the initial
# livepoints but never did sample after
batch_sampler.saved_run['logvol'] = [-np.inf]
batch_sampler.saved_run['logl'] = [logl_min]
batch_sampler.saved_run['logz'] = [-1e100]
batch_sampler.saved_run['logzvar'] = [0]
batch_sampler.saved_run['h'] = [0]
for it, results in enumerate(batch_sampler.add_live_points()):
# Save results.
D = dict(id=results.worst,
u=results.ustar,
v=results.vstar,
logl=results.loglstar,
nc=live_nc[results.worst],
it=results.worst_it,
n=nlive_new - it,
blob=results.blob,
boundidx=results.boundidx,
bounditer=results.bounditer,
scale=batch_sampler.scale)
self.new_run.append(D)
# Increment relevant counters.
self.eff = 100. * self.it / self.ncall
self.it += 1
yield IteratorResultShort(worst=results.worst,
ustar=results.ustar,
vstar=results.vstar,
loglstar=results.loglstar,
nc=live_nc[results.worst],
worst_it=results.worst_it,
blob=results.blob,
boundidx=results.boundidx,
bounditer=results.bounditer,
eff=self.eff)
del self.batch_sampler
def combine_runs(self):
""" Merge the most recent run into the previous (combined) run by
"stepping through" both runs simultaneously."""
# Make sure we have a run to add.
if len(self.new_run['id']) == 0:
raise ValueError("No new samples are currently saved.")
# Grab results from saved run.
saved_d = {}
new_d = {}
for k in [
'id', 'u', 'v', 'logl', 'nc', 'boundidx', 'it', 'bounditer',
'n', 'scale', 'blob'
]:
saved_d[k] = np.array(self.saved_run[k])
new_d[k] = np.array(self.new_run[k])
saved_d['batch'] = np.array(self.saved_run['batch'])
nsaved = len(saved_d['n'])
new_d['id'] = new_d['id'] + max(saved_d['id']) + 1
nnew = len(new_d['n'])
llmin, llmax = self.new_logl_min, self.new_logl_max
old_batch_bounds = self.saved_run['batch_bounds']
old_batch_nlive = self.saved_run['batch_nlive']
# Reset saved results.
del self.saved_run
self.saved_run = RunRecord(dynamic=True)
# Start our counters at the beginning of each set of dead points.
idx_saved, idx_new = 0, 0 # start of our dead points
logl_s, logl_n = saved_d['logl'][idx_saved], new_d['logl'][idx_new]
nlive_s, nlive_n = saved_d['n'][idx_saved], new_d['n'][idx_new]
# Iteratively walk through both set of samples to simulate
# a combined run.
ntot = nsaved + nnew
logvol = 0.
for _ in range(ntot):
if logl_s > self.new_logl_min:
# If our saved samples are past the lower log-likelihood
# bound, both runs are now "active" and should be used.
nlive = nlive_s + nlive_n
else:
# If instead our collection of dead points are below
# the bound, just use our collection of saved samples.
nlive = nlive_s
add_info = {}
# Increment our position along depending on
# which dead point (saved or new) is worse.
if logl_s <= logl_n:
add_info['batch'] = saved_d['batch'][idx_saved]
add_source = saved_d
add_idx = int(idx_saved)
idx_saved += 1
else:
add_info['batch'] = self.batch + 1
add_source = new_d
add_idx = int(idx_new)
idx_new += 1
for k in [
'id', 'u', 'v', 'logl', 'nc', 'boundidx', 'it',
'bounditer', 'scale', 'blob'
]:
add_info[k] = add_source[k][add_idx]
self.saved_run.append(add_info)
# Save the number of live points and expected ln(volume).
logvol -= math.log((nlive + 1.) / nlive)
self.saved_run['n'].append(nlive)
self.saved_run['logvol'].append(logvol)
# Attempt to step along our samples. If we're out of samples,
# set values to defaults.
try:
logl_s = saved_d['logl'][idx_saved]
nlive_s = saved_d['n'][idx_saved]
except IndexError:
logl_s = np.inf
nlive_s = 0
try:
logl_n = new_d['logl'][idx_new]
nlive_n = new_d['n'][idx_new]
except IndexError:
logl_n = np.inf
nlive_n = 0
# ensure that we correctly merged
assert self.saved_run['logl'][0] == min(new_d['logl'][0],
saved_d['logl'][0])
assert self.saved_run['logl'][-1] == max(new_d['logl'][-1],
saved_d['logl'][-1])
new_logwt, new_logz, new_logzvar, new_h = compute_integrals(
logl=self.saved_run['logl'], logvol=self.saved_run['logvol'])
self.saved_run['logwt'].extend(new_logwt.tolist())
self.saved_run['logz'].extend(new_logz.tolist())
self.saved_run['logzvar'].extend(new_logzvar.tolist())
self.saved_run['h'].extend(new_h.tolist())
# Reset results.
self.new_run = RunRecord(dynamic=True)
self.new_logl_min, self.new_logl_max = -np.inf, np.inf
# Increment batch counter.
self.batch += 1
# Saved batch quantities.
self.saved_run['batch_nlive'] = old_batch_nlive + [(max(new_d['n']))]
self.saved_run['batch_bounds'] = old_batch_bounds + [((llmin, llmax))]
def run_nested(self,
nlive_init=None,
maxiter_init=None,
maxcall_init=None,
dlogz_init=0.01,
logl_max_init=np.inf,
n_effective_init=np.inf,
nlive_batch=None,
wt_function=None,
wt_kwargs=None,
maxiter_batch=None,
maxcall_batch=None,
maxiter=None,
maxcall=None,
maxbatch=None,
n_effective=None,
stop_function=None,
stop_kwargs=None,
use_stop=True,
save_bounds=True,
print_progress=True,
print_func=None,
live_points=None,
resume=False,
checkpoint_file=None,
checkpoint_every=60):
"""
**The main dynamic nested sampling loop.** After an initial "baseline"
run using a constant number of live points, dynamically allocates
additional (nested) samples to optimize a specified weight function
until a specified stopping criterion is reached.
Parameters
----------
nlive_init : int, optional
The number of live points used during the initial ("baseline")
nested sampling run. Default is the number provided at
initialization
maxiter_init : int, optional
Maximum number of iterations for the initial baseline nested
sampling run. Iteration may stop earlier if the
termination condition is reached. Default is `sys.maxsize`
(no limit).
maxcall_init : int, optional
Maximum number of likelihood evaluations for the initial
baseline nested sampling run. Iteration may stop earlier
if the termination condition is reached. Default is `sys.maxsize`
(no limit).
dlogz_init : float, optional
The baseline run will stop when the estimated contribution of the
remaining prior volume to the total evidence falls below
this threshold. Explicitly, the stopping criterion is
`ln(z + z_est) - ln(z) < dlogz`, where `z` is the current
evidence from all saved samples and `z_est` is the estimated
contribution from the remaining volume. The default is
`0.01`.
logl_max_init : float, optional
The baseline run will stop when the sampled ln(likelihood) exceeds
this threshold. Default is no bound (`np.inf`).
n_effective_init: int, optional
Minimum number of effective posterior samples needed during the
baseline run. If the estimated "effective sample size" (ESS)
exceeds this number, sampling will terminate.
Default is no ESS (`np.inf`).
This option is deprecated and will be removed in a future release.
nlive_batch : int, optional
The number of live points used when adding additional samples
from a nested sampling run within each batch. Default is the
number provided at init
wt_function : func, optional
A cost function that takes a :class:`Results` instance
and returns a log-likelihood range over which a new batch of
samples should be generated. The default function simply
computes a weighted average of the posterior and evidence
information content as::
weight = pfrac * pweight + (1. - pfrac) * zweight
wt_kwargs : dict, optional
Extra arguments to be passed to the weight function.
maxiter_batch : int, optional
Maximum number of iterations for the nested
sampling run within each batch. Iteration may stop earlier
if the termination condition is reached. Default is `sys.maxsize`
(no limit).
maxcall_batch : int, optional
Maximum number of likelihood evaluations for the nested
sampling run within each batch. Iteration may stop earlier
if the termination condition is reached. Default is `sys.maxsize`
(no limit).
maxiter : int, optional
Maximum number of iterations allowed. Default is `sys.maxsize`
(no limit).
maxcall : int, optional
Maximum number of likelihood evaluations allowed.
Default is `sys.maxsize` (no limit).
maxbatch : int, optional
Maximum number of batches allowed. Default is `sys.maxsize`
(no limit).
n_effective: int, optional
Minimum number of effective posterior samples needed during the
entire run. If the estimated "effective sample size" (ESS)
exceeds this number, sampling will terminate.
Default is max(10000, ndim^2)
stop_function : func, optional
A function that takes a :class:`Results` instance and
returns a boolean indicating that we should terminate the run
because we've collected enough samples.
stop_kwargs : float, optional
Extra arguments to be passed to the stopping function.
use_stop : bool, optional
Whether to evaluate our stopping function after each batch.
Disabling this can improve performance if other stopping criteria
such as :data:`maxcall` are already specified. Default is `True`.
save_bounds : bool, optional
Whether or not to save distributions used to bound
the live points internally during dynamic live point allocation.
Default is `True`.
print_progress : bool, optional
Whether to output a simple summary of the current run that
updates each iteration. Default is `True`.
print_func : function, optional
A function that prints out the current state of the sampler.
If not provided, the default :meth:`results.print_fn` is used.
live_points : list of 3 `~numpy.ndarray` each with shape (nlive, ndim)
A set of live points used to initialize the nested sampling run.
Contains `live_u`, the coordinates on the unit cube, `live_v`, the
transformed variables, and `live_logl`, the associated
loglikelihoods. By default, if these are not provided the initial
set of live points will be drawn from the unit `npdim`-cube.
**WARNING: It is crucial that the initial set of live points have
been sampled from the prior. Failure to provide a set of valid
live points will result in biased results.**
resume: bool, optional
If resume is set to true, we will try to resume a previously
interrupted run
checkpoint_file: string, optional
if not None The state of the sampler will be saved into this
file every checkpoint_every seconds
checkpoint_every: float, optional
The number of seconds between checkpoints that will save
the internal state of the sampler
"""
# Check for deprecated options
if n_effective_init is not np.inf:
with warnings.catch_warnings():
warnings.filterwarnings("once")
warnings.warn(
"The n_effective_init option to DynamicSampler.run_nested "
"is deprecated and will be removed in future releases",
DeprecationWarning)
# Initialize values.
if maxcall is None:
maxcall = sys.maxsize
if maxiter is None:
maxiter = sys.maxsize
if maxiter_batch is None:
maxiter_batch = sys.maxsize
if maxcall_batch is None:
maxcall_batch = sys.maxsize
if maxbatch is None:
maxbatch = sys.maxsize
if maxiter_init is None:
maxiter_init = sys.maxsize
if maxcall_init is None:
maxcall_init = sys.maxsize
if wt_function is None:
wt_function = weight_function
if wt_kwargs is None:
wt_kwargs = {}
if stop_function is None:
default_stop_function = True
stop_function = stopping_function
else:
default_stop_function = False
if stop_kwargs is None:
stop_kwargs = {}
if default_stop_function:
if n_effective is None:
# The reason to scale with square of number of
# dimensions is because the number coefficients
# defining covariance is roughly 0.5 * N^2
n_effective = max(self.npdim * self.npdim, 10000)
stop_kwargs['target_n_effective'] = n_effective
nlive_init = nlive_init or self.nlive0
nlive_batch = nlive_batch or self.nlive0
# Run the main dynamic nested sampling loop.
ncall = self.ncall
niter = self.it - 1
logl_bounds = (-np.inf, np.inf)
maxcall_init = min(maxcall_init, maxcall) # set max calls
maxiter_init = min(maxiter_init, maxiter) # set max iterations
# Baseline run.
pbar, print_func = get_print_func(print_func, print_progress)
timer = DelayTimer(checkpoint_every)
try:
if not self.base:
for results in self.sample_initial(
nlive=nlive_init,
dlogz=dlogz_init,
maxcall=maxcall_init,
maxiter=maxiter_init,
logl_max=logl_max_init,
live_points=live_points,
n_effective=n_effective_init,
resume=resume,
save_samples=True):
if resume:
resume = False
ncall += results.nc
niter += 1
if (checkpoint_file is not None and self.internal_state !=
DynamicSamplerStatesEnum.INBASEADDLIVE
and timer.is_time()):
self.save(checkpoint_file)
# Print progress.
if print_progress:
print_func(results,
niter,
ncall,
nbatch=0,
dlogz=dlogz_init,
logl_max=logl_max_init)
for n in range(self.batch, maxbatch):
# Update stopping criteria.
res = self.results
mcall = min(maxcall - ncall, maxcall_batch)
miter = min(maxiter - niter, maxiter_batch)
if mcall > 0 and miter > 0 and use_stop:
if self.use_pool_stopfn:
M = self.M
else:
M = map
stop, stop_vals = stop_function(res,
stop_kwargs,
rstate=self.rstate,
M=M,
return_vals=True)
stop_val = stop_vals[2]
else:
stop = False
stop_val = np.nan
# If we have likelihood calls remaining, iterations remaining,
# and we have failed to hit the minimum ESS, run our batch.
if mcall > 0 and miter > 0 and not stop:
# Compute our sampling bounds using the provided
# weight function.
passback = self.add_batch(
nlive=nlive_batch,
wt_function=wt_function,
wt_kwargs=wt_kwargs,
maxiter=miter,
maxcall=mcall,
save_bounds=save_bounds,
print_progress=print_progress,
print_func=print_func,
stop_val=stop_val,
resume=resume,
checkpoint_file=checkpoint_file,
checkpoint_every=checkpoint_every)
if resume:
# The assumption here is after the first resume
# iteration we will proceed as normal
resume = False
ncall, niter, logl_bounds, results = passback
elif logl_bounds[1] != np.inf:
# We ran at least one batch and now we're done!
if print_progress:
print_func(results,
niter,
ncall,
nbatch=n,
stop_val=stop_val,
logl_min=logl_bounds[0],
logl_max=logl_bounds[1])
break
else:
# We didn't run a single batch but now we're done!
break
finally:
if pbar is not None:
pbar.close()
self.loglikelihood.history_save()
def add_batch(self,
nlive=500,
dlogz=1e-2,
mode='weight',
wt_function=None,
wt_kwargs=None,
maxiter=None,
maxcall=None,
logl_bounds=None,
save_bounds=True,
print_progress=True,
print_func=None,
stop_val=None,
resume=False,
checkpoint_file=None,
checkpoint_every=60):
"""
Allocate an additional batch of (nested) samples based on
the combined set of previous samples using the specified
weight function.
Parameters
----------
nlive : int, optional
The number of live points used when adding additional samples
in the batch. Default is `500`.
mode: string, optional
How to allocate a new batch.
The possible values are 'auto', 'weight', 'full', 'manual'
'weight' means to use the weight_function to decide the optimal
logl range.
'full' means sample the whole posterior again
'auto' means choose automatically, which currently means using
'weight'
'manual' means that logl_bounds need to be explicitely specified
wt_function : func, optional
A cost function that takes a `Results` instance
and returns a log-likelihood range over which a new batch of
samples should be generated. The default function simply
computes a weighted average of the posterior and evidence
information content as::
weight = pfrac * pweight + (1. - pfrac) * zweight
wt_kwargs : dict, optional
Extra arguments to be passed to the weight function.
maxiter : int, optional
Maximum number of iterations allowed. Default is `sys.maxsize`
(no limit).
maxcall : int, optional
Maximum number of likelihood evaluations allowed.
Default is `sys.maxsize` (no limit).
logl_bounds : tuple of size (2,), optional
The ln(likelihood) bounds used to bracket the run. If `None`,
the provided `wt_function` will be used to determine the bounds
(this is the default behavior).
save_bounds : bool, optional
Whether or not to save distributions used to bound
the live points internally during dynamic live point allocations.
Default is `True`.
print_progress : bool, optional
Whether to output a simple summary of the current run that
updates each iteration. Default is `True`.
print_func : function, optional
A function that prints out the current state of the sampler.
If not provided, the default :meth:`results.print_fn` is used.
stop_val : float, optional
The value of the stopping criteria to be passed to
:meth:`print_func`. Used internally within :meth:`run_nested` to
keep track of progress.
resume: bool, optional
If resume is set to true, we will try to resume a previously
interrupted run
checkpoint_file: string, optional
if not None The state of the sampler will be saved into this
file every checkpoint_every seconds
checkpoint_every: float, optional
The number of seconds between checkpoints that will save
the internal state of the sampler
"""
# Initialize values.
maxcall = maxcall or sys.maxsize
maxiter = maxiter or sys.maxsize
wt_function = wt_function or weight_function
wt_kwargs = wt_kwargs or {}
stop_val = stop_val or np.nan
res = self.results
if mode != 'manual' and logl_bounds is not None:
raise RuntimeError(
"specified logl_bounds are only allowed for manual mode")
if mode == 'manual' and logl_bounds is None:
raise RuntimeError(
"logl_bounds need to be specified for manual mode")
if mode == 'auto' or mode == 'weight':
logl_bounds = wt_function(res, wt_kwargs)
# this is just for printing
if logl_bounds is None:
logl_min, logl_max = -np.inf, np.inf
else:
logl_min, logl_max = logl_bounds
# For printing as well, we just display old logz,logzerr here
logz, logzvar = res['logz'][-1], res['logzerr'][-1]**2
# If we have either likelihood calls or iterations remaining,
# add our new batch of live points.
ncall, niter, n = self.ncall, self.it - 1, self.batch
if checkpoint_file is not None:
timer = DelayTimer(checkpoint_every)
if maxcall > 0 and maxiter > 0:
pbar, print_func = get_print_func(print_func, print_progress)
try:
results = None # to silence pylint as
# sample_batch() should return something given maxiter/maxcall
for cur_results in self.sample_batch(nlive_new=nlive,
dlogz=dlogz,
logl_bounds=logl_bounds,
maxiter=maxiter,
maxcall=maxcall,
save_bounds=save_bounds,
resume=resume):
if resume:
# only one resume iteration, after that
# we switch to normal
resume = False
if cur_results.worst >= 0:
ncall += cur_results.nc
niter += 1
# Reorganize results.
results = IteratorResult(worst=cur_results.worst,
ustar=cur_results.ustar,
vstar=cur_results.vstar,
loglstar=cur_results.loglstar,
blob=cur_results.blob,
logvol=np.nan,
logwt=np.nan,
logz=logz,
logzvar=logzvar,
h=np.nan,
nc=cur_results.nc,
worst_it=cur_results.worst_it,
boundidx=cur_results.boundidx,
bounditer=cur_results.bounditer,
eff=cur_results.eff,
delta_logz=np.nan)
# Print progress.
if print_progress:
print_func(results,
niter,
ncall,
nbatch=n + 1,
stop_val=stop_val,
logl_min=logl_min,
logl_max=logl_max)
if (checkpoint_file is not None and self.internal_state !=
DynamicSamplerStatesEnum.INBATCHADDLIVE
and timer.is_time()):
self.save(checkpoint_file)
finally:
if pbar is not None:
pbar.close()
self.loglikelihood.history_save()
# Combine batch with previous runs.
self.combine_runs()
# Pass back info.
self.internal_state = DynamicSamplerStatesEnum.BATCH_DONE
return ncall, niter, logl_bounds, results
else:
raise RuntimeError(
'add_batch called with no leftover function calls'
'or iterations')
|
{
"content_hash": "b9a1cc0157231f0bcda1c27b61fff6d4",
"timestamp": "",
"source": "github",
"line_count": 2083,
"max_line_length": 79,
"avg_line_length": 41.689390302448395,
"alnum_prop": 0.5213095498566312,
"repo_name": "joshspeagle/dynesty",
"id": "57294af1860e861104e4e1431f7ae7357dc2efd3",
"size": "86885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/dynesty/dynamicsampler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "587395"
}
],
"symlink_target": ""
}
|
import logging
import traceback
import uuid
from enum import IntEnum, unique
from ._journal import send, syslog_priorities
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
_priorities = syslog_priorities()
__all__ = "write", "send", "Priority", "JournaldLogHandler", "Facility"
@unique
class Priority(IntEnum):
PANIC = _priorities["panic"]
WARNING = _priorities["warn"]
ALERT = _priorities["alert"]
NONE = _priorities["none"]
CRITICAL = _priorities["crit"]
DEBUG = _priorities["debug"]
INFO = _priorities["info"]
ERROR = _priorities["error"]
NOTICE = _priorities["notice"]
@unique
class Facility(IntEnum):
KERN = 0
USER = 1
MAIL = 2
DAEMON = 3
AUTH = 4
SYSLOG = 5
LPR = 6
NEWS = 7
UUCP = 8
CLOCK_DAEMON = 9
AUTHPRIV = 10
FTP = 11
NTP = 12
AUDIT = 13
ALERT = 14
CRON = 15
LOCAL0 = 16
LOCAL1 = 17
LOCAL2 = 18
LOCAL3 = 19
LOCAL4 = 20
LOCAL5 = 21
LOCAL6 = 22
LOCAL7 = 23
def write(message, priority=Priority.INFO):
""" Write message into systemd journal
:type priority: Priority
:type message: str
"""
priority = int(Priority(int(priority)))
send(priority=priority, message=message)
class JournaldLogHandler(logging.Handler):
LEVELS = {
logging.CRITICAL: Priority.CRITICAL.value,
logging.FATAL: Priority.PANIC.value,
logging.ERROR: Priority.ERROR.value,
logging.WARNING: Priority.WARNING.value,
logging.WARN: Priority.WARNING.value,
logging.INFO: Priority.INFO.value,
logging.DEBUG: Priority.DEBUG.value,
logging.NOTSET: Priority.NONE.value,
}
__slots__ = ("__facility",)
def __init__(self, identifier=None, facility=Facility.DAEMON):
"""
:type identifier: Override default journald identifier
:type facility: Facility
"""
logging.Handler.__init__(self)
self.__identifier = identifier
self.__facility = int(facility)
@staticmethod
def _to_microsecond(ts):
"""
:type ts: float
"""
return int(ts * 1000 * 1000)
def emit(self, record):
message = str(record.getMessage())
tb_message = ""
if record.exc_info:
tb_message = "\n".join(
traceback.format_exception(*record.exc_info)
)
message += "\n"
message += tb_message
ts = self._to_microsecond(record.created)
hash_fields = (
message,
record.funcName,
record.levelno,
record.process,
record.processName,
record.levelname,
record.pathname,
record.name,
record.thread,
record.lineno,
ts,
tb_message,
)
message_id = uuid.uuid3(
uuid.NAMESPACE_OID, "$".join(str(x) for x in hash_fields)
).hex
data = {
key: value
for key, value in record.__dict__.items()
if not key.startswith("_") and value is not None
}
data["message"] = self.format(record)
data["priority"] = self.LEVELS[data.pop("levelno")]
data["syslog_facility"] = self.__facility
data["code_file"] = data.pop("filename")
data["code_line"] = data.pop("lineno")
data["code_func"] = data.pop("funcName")
if self.__identifier:
data["syslog_identifier"] = self.__identifier
else:
data["syslog_identifier"] = data["name"]
if "msg" in data:
data["message_raw"] = data.pop("msg")
data["message_id"] = message_id
data["code_module"] = data.pop("module")
data["logger_name"] = data.pop("name")
data["pid"] = data.pop("process")
data["proccess_name"] = data.pop("processName")
data["errno"] = 0 if not record.exc_info else 255
data["relative_ts"] = self._to_microsecond(data.pop("relativeCreated"))
data["thread_name"] = data.pop("threadName")
args = data.pop("args", [])
if isinstance(args, Mapping):
for key, value in args.items():
data["argument_%s" % key] = value
else:
for idx, item in enumerate(args):
data["argument_%d" % idx] = str(item)
if tb_message:
data["traceback"] = tb_message
send(**data)
handler = JournaldLogHandler()
class JournaldLogger(logging.Logger):
def __init__(self, level, name="root"):
super(JournaldLogger, self).__init__(name, level)
self.addHandler(handler)
Logger = JournaldLogger(logging.WARNING)
|
{
"content_hash": "573d5afc0146e30ee33520eb2dd71c80",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 79,
"avg_line_length": 25.110526315789475,
"alnum_prop": 0.5663382938587298,
"repo_name": "mosquito/python-systemd",
"id": "e58c80971770cb5633dd9cee2f5ddc68189c843e",
"size": "4771",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cysystemd/journal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "30849"
}
],
"symlink_target": ""
}
|
from kolibri.auth.api import KolibriAuthPermissions, KolibriAuthPermissionsFilter
from kolibri.content.api import OptionalPageNumberPagination
from rest_framework import filters, viewsets
from .models import ContentRatingLog, ContentSessionLog, ContentSummaryLog, UserSessionLog
from .serializers import ContentRatingLogSerializer, ContentSessionLogSerializer, ContentSummaryLogSerializer, UserSessionLogSerializer
class ContentSessionLogFilter(filters.FilterSet):
class Meta:
model = ContentSessionLog
fields = ['user_id', 'content_id']
class ContentSessionLogViewSet(viewsets.ModelViewSet):
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)
queryset = ContentSessionLog.objects.all()
serializer_class = ContentSessionLogSerializer
pagination_class = OptionalPageNumberPagination
filter_class = ContentSessionLogFilter
class ContentSummaryFilter(filters.FilterSet):
class Meta:
model = ContentSummaryLog
fields = ['user_id', 'content_id']
class ContentSummaryLogViewSet(viewsets.ModelViewSet):
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter, filters.DjangoFilterBackend)
queryset = ContentSummaryLog.objects.all()
serializer_class = ContentSummaryLogSerializer
pagination_class = OptionalPageNumberPagination
filter_class = ContentSummaryFilter
class ContentRatingLogViewSet(viewsets.ModelViewSet):
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter,)
queryset = ContentRatingLog.objects.all()
serializer_class = ContentRatingLogSerializer
pagination_class = OptionalPageNumberPagination
class UserSessionLogViewSet(viewsets.ModelViewSet):
permission_classes = (KolibriAuthPermissions,)
filter_backends = (KolibriAuthPermissionsFilter,)
queryset = UserSessionLog.objects.all()
serializer_class = UserSessionLogSerializer
pagination_class = OptionalPageNumberPagination
|
{
"content_hash": "670f1bb6b7fe22d202d18cf78a0c4b72",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 135,
"avg_line_length": 39.22641509433962,
"alnum_prop": 0.8013468013468014,
"repo_name": "ralphiee22/kolibri",
"id": "2fc568ae924af949e53303ddf2ff896263644f04",
"size": "2079",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "kolibri/logger/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5059"
},
{
"name": "HTML",
"bytes": "1703"
},
{
"name": "JavaScript",
"bytes": "174777"
},
{
"name": "Makefile",
"bytes": "1907"
},
{
"name": "Python",
"bytes": "414073"
},
{
"name": "Shell",
"bytes": "6693"
},
{
"name": "Vue",
"bytes": "110216"
}
],
"symlink_target": ""
}
|
"""The tests for the litejet component."""
import logging
import unittest
from homeassistant.components import litejet
from tests.common import get_test_home_assistant
_LOGGER = logging.getLogger(__name__)
class TestLiteJet(unittest.TestCase):
"""Test the litejet component."""
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.start()
self.hass.block_till_done()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_is_ignored_unspecified(self):
self.hass.data['litejet_config'] = {}
assert not litejet.is_ignored(self.hass, 'Test')
def test_is_ignored_empty(self):
self.hass.data['litejet_config'] = {
litejet.CONF_EXCLUDE_NAMES: []
}
assert not litejet.is_ignored(self.hass, 'Test')
def test_is_ignored_normal(self):
self.hass.data['litejet_config'] = {
litejet.CONF_EXCLUDE_NAMES: ['Test', 'Other One']
}
assert litejet.is_ignored(self.hass, 'Test')
assert not litejet.is_ignored(self.hass, 'Other one')
assert not litejet.is_ignored(self.hass, 'Other 0ne')
assert litejet.is_ignored(self.hass, 'Other One There')
assert litejet.is_ignored(self.hass, 'Other One')
|
{
"content_hash": "413cb3fa1ebfa274c2225d7301ed66b1",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 63,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.6392857142857142,
"repo_name": "ma314smith/home-assistant",
"id": "6d62e1ab0cda60ac0d9ae32e355559f77bb1cd4a",
"size": "1400",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/test_litejet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1436909"
},
{
"name": "Python",
"bytes": "4511947"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "4460"
}
],
"symlink_target": ""
}
|
import inspect
import weakref
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Tuple
from typing import Union
import pandas as pd
import apache_beam as beam
from apache_beam import pvalue
from apache_beam.dataframe import expressions
from apache_beam.dataframe import frame_base
from apache_beam.dataframe import schemas
from apache_beam.dataframe import transforms
if TYPE_CHECKING:
# pylint: disable=ungrouped-imports
from typing import Optional
# TODO: Or should this be called as_dataframe?
def to_dataframe(
pcoll, # type: pvalue.PCollection
proxy=None, # type: Optional[pd.core.generic.NDFrame]
label=None, # type: Optional[str]
):
# type: (...) -> frame_base.DeferredFrame
"""Converts a PCollection to a deferred dataframe-like object, which can
manipulated with pandas methods like `filter` and `groupby`.
For example, one might write::
pcoll = ...
df = to_dataframe(pcoll, proxy=...)
result = df.groupby('col').sum()
pcoll_result = to_pcollection(result)
A proxy object must be given if the schema for the PCollection is not known.
"""
if proxy is None:
if pcoll.element_type is None:
raise ValueError(
"Cannot infer a proxy because the input PCollection does not have a "
"schema defined. Please make sure a schema type is specified for "
"the input PCollection, or provide a proxy.")
# If no proxy is given, assume this is an element-wise schema-aware
# PCollection that needs to be batched.
if label is None:
# Attempt to come up with a reasonable, stable label by retrieving
# the name of these variables in the calling context.
label = 'BatchElements(%s)' % _var_name(pcoll, 2)
proxy = schemas.generate_proxy(pcoll.element_type)
pcoll = pcoll | label >> schemas.BatchRowsAsDataFrame(proxy=proxy)
return frame_base.DeferredFrame.wrap(
expressions.PlaceholderExpression(proxy, pcoll))
# PCollections generated by to_pcollection are memoized, keyed by expression id.
# WeakValueDictionary is used so the caches are cleaned up with the parent
# pipelines
# Note that the pipeline (indirectly) holds references to the transforms which
# keeps both the PCollections and expressions alive. This ensures the
# expression's ids are never accidentally re-used.
TO_PCOLLECTION_CACHE = weakref.WeakValueDictionary(
) # type: weakref.WeakValueDictionary[str, pvalue.PCollection]
UNBATCHED_CACHE = weakref.WeakValueDictionary(
) # type: weakref.WeakValueDictionary[str, pvalue.PCollection]
def _make_unbatched_pcoll(
pc: pvalue.PCollection, expr: expressions.Expression,
include_indexes: bool):
label = f"Unbatch '{expr._id}'"
if include_indexes:
label += " with indexes"
if label not in UNBATCHED_CACHE:
UNBATCHED_CACHE[label] = pc | label >> schemas.UnbatchPandas(
expr.proxy(), include_indexes=include_indexes)
# Note unbatched cache is keyed by the expression id as well as parameters
# for the unbatching (i.e. include_indexes)
return UNBATCHED_CACHE[label]
# TODO: Or should this be called from_dataframe?
def to_pcollection(
*dataframes, # type: Union[frame_base.DeferredFrame, pd.DataFrame, pd.Series]
label=None,
always_return_tuple=False,
yield_elements='schemas',
include_indexes=False,
pipeline=None) -> Union[pvalue.PCollection, Tuple[pvalue.PCollection, ...]]:
"""Converts one or more deferred dataframe-like objects back to a PCollection.
This method creates and applies the actual Beam operations that compute
the given deferred dataframes, returning a PCollection of their results. By
default the resulting PCollections are schema-aware PCollections where each
element is one row from the output dataframes, excluding indexes. This
behavior can be modified with the `yield_elements` and `include_indexes`
arguments.
Also accepts non-deferred pandas dataframes, which are converted to deferred,
schema'd PCollections. In this case the contents of the entire dataframe are
serialized into the graph, so for large amounts of data it is preferable to
write them to disk and read them with one of the read methods.
If more than one (related) result is desired, it can be more efficient to
pass them all at the same time to this method.
Args:
label: (optional, default "ToPCollection(...)"") the label to use for the
conversion transform.
always_return_tuple: (optional, default: False) If true, always return
a tuple of PCollections, even if there's only one output.
yield_elements: (optional, default: "schemas") If set to "pandas", return
PCollections containing the raw Pandas objects (DataFrames or Series),
if set to "schemas", return an element-wise PCollection, where DataFrame
and Series instances are expanded to one element per row. DataFrames are
converted to schema-aware PCollections, where column values can be
accessed by attribute.
include_indexes: (optional, default: False) When yield_elements="schemas",
if include_indexes=True, attempt to include index columns in the output
schema for expanded DataFrames. Raises an error if any of the index
levels are unnamed (name=None), or if any of the names are not unique
among all column and index names.
pipeline: (optional, unless non-deferred dataframes are passed) Used when
creating a PCollection from a non-deferred dataframe.
"""
if not yield_elements in ("pandas", "schemas"):
raise ValueError(
"Invalid value for yield_elements argument, '%s'. "
"Allowed values are 'pandas' and 'schemas'" % yield_elements)
if label is None:
# Attempt to come up with a reasonable, stable label by retrieving the name
# of these variables in the calling context.
label = 'ToPCollection(%s)' % ', '.join(_var_name(e, 3) for e in dataframes)
# Support for non-deferred dataframes.
deferred_dataframes = []
for ix, df in enumerate(dataframes):
if isinstance(df, frame_base.DeferredBase):
# TODO(robertwb): Maybe extract pipeline object?
deferred_dataframes.append(df)
elif isinstance(df, (pd.Series, pd.DataFrame)):
if pipeline is None:
raise ValueError(
'Pipeline keyword required for non-deferred dataframe conversion.')
deferred = pipeline | '%s_Defer%s' % (label, ix) >> beam.Create([df])
deferred_dataframes.append(
frame_base.DeferredFrame.wrap(
expressions.PlaceholderExpression(df.iloc[:0], deferred)))
else:
raise TypeError(
'Unable to convert objects of type %s to a PCollection' % type(df))
dataframes = tuple(deferred_dataframes)
def extract_input(placeholder):
if not isinstance(placeholder._reference, pvalue.PCollection):
raise TypeError(
'Expression roots must have been created with to_dataframe.')
return placeholder._reference
placeholders = frozenset.union(
frozenset(), *[df._expr.placeholders() for df in dataframes])
# Exclude any dataframes that have already been converted to PCollections.
# We only want to convert each DF expression once, then re-use.
new_dataframes = [
df for df in dataframes if df._expr._id not in TO_PCOLLECTION_CACHE
]
if len(new_dataframes):
new_results = {p: extract_input(p)
for p in placeholders
} | label >> transforms._DataframeExpressionsTransform({
ix: df._expr
for (ix, df) in enumerate(new_dataframes)
}) # type: Dict[Any, pvalue.PCollection]
TO_PCOLLECTION_CACHE.update(
{new_dataframes[ix]._expr._id: pc
for ix, pc in new_results.items()})
raw_results = {
ix: TO_PCOLLECTION_CACHE[df._expr._id]
for ix,
df in enumerate(dataframes)
}
if yield_elements == "schemas":
def maybe_unbatch(pc, value):
if isinstance(value, frame_base._DeferredScalar):
return pc
else:
return _make_unbatched_pcoll(pc, value._expr, include_indexes)
results = {
ix: maybe_unbatch(pc, dataframes[ix])
for (ix, pc) in raw_results.items()
}
else:
results = raw_results
if len(results) == 1 and not always_return_tuple:
return results[0]
else:
return tuple(value for key, value in sorted(results.items()))
def _var_name(obj, level):
frame = inspect.currentframe()
for _ in range(level):
if frame is None:
return '...'
frame = frame.f_back
for key, value in frame.f_locals.items():
if obj is value:
return key
for key, value in frame.f_globals.items():
if obj is value:
return key
return '...'
|
{
"content_hash": "e59a481ef90262b55c65829d234b06af",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 82,
"avg_line_length": 38.526315789473685,
"alnum_prop": 0.6982012750455373,
"repo_name": "robertwb/incubator-beam",
"id": "2b207dda7270df7f36a5724a36a0dbdd97fd6a26",
"size": "9567",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/dataframe/convert.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "C",
"bytes": "3869"
},
{
"name": "CSS",
"bytes": "4957"
},
{
"name": "Cython",
"bytes": "59582"
},
{
"name": "Dart",
"bytes": "541526"
},
{
"name": "Dockerfile",
"bytes": "48191"
},
{
"name": "FreeMarker",
"bytes": "7933"
},
{
"name": "Go",
"bytes": "4688736"
},
{
"name": "Groovy",
"bytes": "888171"
},
{
"name": "HCL",
"bytes": "101646"
},
{
"name": "HTML",
"bytes": "164685"
},
{
"name": "Java",
"bytes": "38649211"
},
{
"name": "JavaScript",
"bytes": "105966"
},
{
"name": "Jupyter Notebook",
"bytes": "55818"
},
{
"name": "Kotlin",
"bytes": "209531"
},
{
"name": "Lua",
"bytes": "3620"
},
{
"name": "Python",
"bytes": "9785295"
},
{
"name": "SCSS",
"bytes": "312814"
},
{
"name": "Sass",
"bytes": "19336"
},
{
"name": "Scala",
"bytes": "1429"
},
{
"name": "Shell",
"bytes": "336583"
},
{
"name": "Smarty",
"bytes": "2618"
},
{
"name": "Thrift",
"bytes": "3260"
},
{
"name": "TypeScript",
"bytes": "181369"
}
],
"symlink_target": ""
}
|
"""SCons.Platform
SCons platform selection.
This looks for modules that define a callable object that can modify a
construction environment as appropriate for a given platform.
Note that we take a more simplistic view of "platform" than Python does.
We're looking for a single string that determines a set of
tool-independent variables with which to initialize a construction
environment. Consequently, we'll examine both sys.platform and os.name
(and anything else that might come in to play) in order to return some
specification which is unique enough for our purposes.
Note that because this subsysem just *selects* a callable that can
modify a construction environment, it's possible for people to define
their own "platform specification" in an arbitrary callable function.
No one needs to use or tie in to this subsystem in order to roll
their own platform definition.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/__init__.py 2014/07/05 09:42:21 garyo"
import SCons.compat
import imp
import os
import sys
import tempfile
import SCons.Errors
import SCons.Subst
import SCons.Tool
def platform_default():
"""Return the platform string for our execution environment.
The returned value should map to one of the SCons/Platform/*.py
files. Since we're architecture independent, though, we don't
care about the machine architecture.
"""
osname = os.name
if osname == 'java':
osname = os._osType
if osname == 'posix':
if sys.platform == 'cygwin':
return 'cygwin'
elif sys.platform.find('irix') != -1:
return 'irix'
elif sys.platform.find('sunos') != -1:
return 'sunos'
elif sys.platform.find('hp-ux') != -1:
return 'hpux'
elif sys.platform.find('aix') != -1:
return 'aix'
elif sys.platform.find('darwin') != -1:
return 'darwin'
else:
return 'posix'
elif os.name == 'os2':
return 'os2'
else:
return sys.platform
def platform_module(name = platform_default()):
"""Return the imported module for the platform.
This looks for a module name that matches the specified argument.
If the name is unspecified, we fetch the appropriate default for
our execution environment.
"""
full_name = 'SCons.Platform.' + name
if full_name not in sys.modules:
if os.name == 'java':
eval(full_name)
else:
try:
file, path, desc = imp.find_module(name,
sys.modules['SCons.Platform'].__path__)
try:
mod = imp.load_module(full_name, file, path, desc)
finally:
if file:
file.close()
except ImportError:
try:
import zipimport
importer = zipimport.zipimporter( sys.modules['SCons.Platform'].__path__[0] )
mod = importer.load_module(full_name)
except ImportError:
raise SCons.Errors.UserError("No platform named '%s'" % name)
setattr(SCons.Platform, name, mod)
return sys.modules[full_name]
def DefaultToolList(platform, env):
"""Select a default tool list for the specified platform.
"""
return SCons.Tool.tool_list(platform, env)
class PlatformSpec(object):
def __init__(self, name, generate):
self.name = name
self.generate = generate
def __call__(self, *args, **kw):
return self.generate(*args, **kw)
def __str__(self):
return self.name
class TempFileMunge(object):
"""A callable class. You can set an Environment variable to this,
then call it with a string argument, then it will perform temporary
file substitution on it. This is used to circumvent the long command
line limitation.
Example usage:
env["TEMPFILE"] = TempFileMunge
env["LINKCOM"] = "${TEMPFILE('$LINK $TARGET $SOURCES')}"
By default, the name of the temporary file used begins with a
prefix of '@'. This may be configred for other tool chains by
setting '$TEMPFILEPREFIX'.
env["TEMPFILEPREFIX"] = '-@' # diab compiler
env["TEMPFILEPREFIX"] = '-via' # arm tool chain
"""
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, target, source, env, for_signature):
if for_signature:
# If we're being called for signature calculation, it's
# because we're being called by the string expansion in
# Subst.py, which has the logic to strip any $( $) that
# may be in the command line we squirreled away. So we
# just return the raw command line and let the upper
# string substitution layers do their thing.
return self.cmd
# Now we're actually being called because someone is actually
# going to try to execute the command, so we have to do our
# own expansion.
cmd = env.subst_list(self.cmd, SCons.Subst.SUBST_CMD, target, source)[0]
try:
maxline = int(env.subst('$MAXLINELENGTH'))
except ValueError:
maxline = 2048
length = 0
for c in cmd:
length += len(c)
if length <= maxline:
return self.cmd
# We do a normpath because mktemp() has what appears to be
# a bug in Windows that will use a forward slash as a path
# delimiter. Windows's link mistakes that for a command line
# switch and barfs.
#
# We use the .lnk suffix for the benefit of the Phar Lap
# linkloc linker, which likes to append an .lnk suffix if
# none is given.
(fd, tmp) = tempfile.mkstemp('.lnk', text=True)
native_tmp = SCons.Util.get_native_path(os.path.normpath(tmp))
if env['SHELL'] and env['SHELL'] == 'sh':
# The sh shell will try to escape the backslashes in the
# path, so unescape them.
native_tmp = native_tmp.replace('\\', r'\\\\')
# In Cygwin, we want to use rm to delete the temporary
# file, because del does not exist in the sh shell.
rm = env.Detect('rm') or 'del'
else:
# Don't use 'rm' if the shell is not sh, because rm won't
# work with the Windows shells (cmd.exe or command.com) or
# Windows path names.
rm = 'del'
prefix = env.subst('$TEMPFILEPREFIX')
if not prefix:
prefix = '@'
args = list(map(SCons.Subst.quote_spaces, cmd[1:]))
os.write(fd, " ".join(args) + "\n")
os.close(fd)
# XXX Using the SCons.Action.print_actions value directly
# like this is bogus, but expedient. This class should
# really be rewritten as an Action that defines the
# __call__() and strfunction() methods and lets the
# normal action-execution logic handle whether or not to
# print/execute the action. The problem, though, is all
# of that is decided before we execute this method as
# part of expanding the $TEMPFILE construction variable.
# Consequently, refactoring this will have to wait until
# we get more flexible with allowing Actions to exist
# independently and get strung together arbitrarily like
# Ant tasks. In the meantime, it's going to be more
# user-friendly to not let obsession with architectural
# purity get in the way of just being helpful, so we'll
# reach into SCons.Action directly.
if SCons.Action.print_actions:
print("Using tempfile "+native_tmp+" for command line:\n"+
str(cmd[0]) + " " + " ".join(args))
return [ cmd[0], prefix + native_tmp + '\n' + rm, native_tmp ]
def Platform(name = platform_default()):
"""Select a canned Platform specification.
"""
module = platform_module(name)
spec = PlatformSpec(name, module.generate)
return spec
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "c87958830a6a44b0141ccf08cece9dea",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 97,
"avg_line_length": 38.697095435684645,
"alnum_prop": 0.6316748874115377,
"repo_name": "dezelin/scons",
"id": "f8d804128d0ffd82672e53a9e4643b57815ec05b",
"size": "9326",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scons-local/SCons/Platform/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1914323"
}
],
"symlink_target": ""
}
|
from flask import Blueprint, render_template
main_module = Blueprint(
'main', __name__, template_folder='transporter/templates')
@main_module.route('/')
def index():
context = {}
return render_template('index.html', **context)
|
{
"content_hash": "3c0237e0e4d2a3afd3712d039aecaed6",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 62,
"avg_line_length": 22.09090909090909,
"alnum_prop": 0.6790123456790124,
"repo_name": "suminb/transporter",
"id": "74dbe5a4837a6c1e458b2edd5023dc9870250851",
"size": "243",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "transporter/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3900"
},
{
"name": "Python",
"bytes": "32424"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='question',
name='demo_1',
field=models.CharField(default='demo val 1', max_length=200),
),
migrations.AddField(
model_name='question',
name='demo_2',
field=models.CharField(default='demo val 2', max_length=200),
),
migrations.AddField(
model_name='question',
name='demo_3',
field=models.CharField(default='demo val 3', max_length=200),
),
migrations.AddField(
model_name='question',
name='demo_4',
field=models.CharField(default='demo val 4', max_length=200),
),
migrations.AddField(
model_name='question',
name='demo_5',
field=models.CharField(default='demo val 5', max_length=200),
),
]
|
{
"content_hash": "09b482e34974a657edbaf57cb934a37a",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 73,
"avg_line_length": 28.736842105263158,
"alnum_prop": 0.5412087912087912,
"repo_name": "vithd/vithd.github.io",
"id": "59cf6d6a73a561bf618eaf8ab483436058ee052b",
"size": "1165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/mysite/polls/migrations/0002_auto_20170811_0436.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10596"
},
{
"name": "HTML",
"bytes": "22909"
},
{
"name": "JavaScript",
"bytes": "329912"
},
{
"name": "Python",
"bytes": "21417"
}
],
"symlink_target": ""
}
|
from setuptools import __version__
from setuptools import setup
if not int(__version__.partition(".")[0]) >= 47:
raise RuntimeError(f"Setuptools >= 47 required. Found {__version__}")
setup()
|
{
"content_hash": "471c8837db41361030443c1cfef0303c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 73,
"avg_line_length": 28.142857142857142,
"alnum_prop": 0.6802030456852792,
"repo_name": "sqlalchemy/alembic",
"id": "1a4f69a8c0cc456cfe831517adad46568b4cbeb1",
"size": "197",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "11708"
},
{
"name": "Python",
"bytes": "1588332"
}
],
"symlink_target": ""
}
|
'''
iOS Compass
-----------
'''
from plyer.facades import Compass
from pyobjus import autoclass
class IosCompass(Compass):
def __init__(self):
super(IosCompass, self).__init__()
self.bridge = autoclass('bridge').alloc().init()
self.bridge.motionManager.setMagnetometerUpdateInterval_(0.1)
self.bridge.motionManager.setDeviceMotionUpdateInterval_(0.1)
def _enable(self):
self.bridge.startMagnetometer()
self.bridge.startDeviceMotionWithReferenceFrame()
def _disable(self):
self.bridge.stopMagnetometer()
self.bridge.stopDeviceMotion()
def _get_orientation(self):
return (
self.bridge.mf_x,
self.bridge.mf_y,
self.bridge.mf_z)
def _get_field_uncalib(self):
return (
self.bridge.mg_x,
self.bridge.mg_y,
self.bridge.mg_z,
self.bridge.mg_x - self.bridge.mf_x,
self.bridge.mg_y - self.bridge.mf_y,
self.bridge.mg_z - self.bridge.mf_z)
def instance():
return IosCompass()
|
{
"content_hash": "b734ac72cf65d6ec3f5f4f38fbaca13a",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 69,
"avg_line_length": 25.27906976744186,
"alnum_prop": 0.5998160073597056,
"repo_name": "KeyWeeUsr/plyer",
"id": "a710c866d3c8bedd56423bbd5369d0fce67842ff",
"size": "1087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plyer/platforms/ios/compass.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1332"
},
{
"name": "HTML",
"bytes": "19384"
},
{
"name": "Makefile",
"bytes": "868"
},
{
"name": "Python",
"bytes": "422945"
},
{
"name": "Shell",
"bytes": "5724"
}
],
"symlink_target": ""
}
|
"""project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from rest_framework_jwt.views import refresh_jwt_token
from open_discussions.views import index, saml_metadata, channel_redirect, channel_post
# Post slugs can contain unicode characters, so a letter-matching pattern like [A-Za-z] doesn't work.
# "[^\W]" Matches any character that is NOT a non-alphanumeric character, including underscores.
# "[^\W]" will match all numbers, underscores, and letters, unicode or otherwise. To accept dashes
# as well, that character is added to the pattern via an alternation (|).
POST_SLUG_PATTERN = "([^\\W]|-)+"
handler400 = "open_discussions.views.handle_400"
handler403 = "open_discussions.views.handle_403"
handler404 = "open_discussions.views.handle_404"
urlpatterns = [
url(r"^admin/", admin.site.urls),
url(r"^status/", include("server_status.urls")),
url(r"", include("authentication.urls")),
url(r"", include("social_django.urls", namespace="social")),
url(r"", include("channels.urls")),
url(r"", include("channels_fields.urls")),
url(r"", include("infinite_example.urls"), name="infinite_example"),
url(r"", include("profiles.urls")),
url(r"", include("mail.urls")),
url(r"", include("notifications.urls")),
url(r"", include("embedly.urls")),
url(r"", include("search.urls")),
url(r"", include("ckeditor.urls")),
url(r"", include("widgets.urls")),
url(r"", include("course_catalog.urls")),
url(r"", include("livestream.urls")),
url(r"", include("interactions.urls")),
url(r"^api/token/refresh/", refresh_jwt_token),
# React App
url(r"^$", index, name="open_discussions-index"),
url(r"^auth_required/$", index),
url(r"^content_policy/$", index),
url(
r"^c/(?P<channel_name>[A-Za-z0-9_]+)/(?P<post_id>[A-Za-z0-9_]+)/"
r"(?P<post_slug>{post_slug_pattern})/comment/(?P<comment_id>[A-Za-z0-9_]+)/?$".format(
post_slug_pattern=POST_SLUG_PATTERN
),
channel_post,
name="channel-post-comment",
),
url(
r"^c/(?P<channel_name>[A-Za-z0-9_]+)/(?P<post_id>[A-Za-z0-9_]+)/(?P<post_slug>{post_slug_pattern})/?$".format(
post_slug_pattern=POST_SLUG_PATTERN
),
channel_post,
name="channel-post",
),
url(r"^c/(?P<channel_name>[A-Za-z0-9_]+)/$", index, name="channel"),
url(
r"^manage/c/edit/(?P<channel_name>[A-Za-z0-9_]+)/basic/$",
index,
name="manage-channel",
),
url(r"^settings/(?P<token>[^/]+)/$", index, name="settings-anon"),
url(r"^c/", index),
url(r"^channel/", channel_redirect),
url(r"^manage/", index),
url(r"^create_post/", index),
url(r"^settings/", index),
url(r"^saml/metadata/", saml_metadata, name="saml-metadata"),
url(r"^profile/(?P<username>[A-Za-z0-9_]+)/", index, name="profile"),
url(r"^login/", index, name="login"),
url(r"^signup/", index, name="signup"),
url(r"^signup/confirm/$", index, name="register-confirm"),
url(r"^account/inactive/$", index, name="account-inactive"),
url(r"^password_reset/", index, name="password-reset"),
url(
r"^password_reset/confirm/(?P<uid>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$",
index,
name="password-reset-confirm",
),
url(r"^privacy-statement/", index, name="privacy-statement"),
url(r"^search/", index, name="site-search"),
url(r"^courses/", index, name="courses"),
url(r"^learn/", index, name="learn"),
url(r"^fields/", index, name="fields"),
url(r"^podcasts/", index, name="podcasts"),
url(r"^terms-and-conditions/", index, name="terms-and-conditions"),
# Hijack
url(r"^hijack/", include("hijack.urls", namespace="hijack")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar # pylint: disable=wrong-import-position, wrong-import-order
urlpatterns += [url(r"^__debug__/", include(debug_toolbar.urls))]
|
{
"content_hash": "65b46f62a9b7d241c6e95e897b5df887",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 118,
"avg_line_length": 42.85454545454545,
"alnum_prop": 0.6304624522698346,
"repo_name": "mitodl/open-discussions",
"id": "f74260293aa5559f90e79ab0f030fcacd1ae4a29",
"size": "4714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "open_discussions/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1040"
},
{
"name": "HTML",
"bytes": "78316"
},
{
"name": "JavaScript",
"bytes": "1704037"
},
{
"name": "Procfile",
"bytes": "675"
},
{
"name": "Python",
"bytes": "2264549"
},
{
"name": "SCSS",
"bytes": "133442"
},
{
"name": "Shell",
"bytes": "11787"
},
{
"name": "TypeScript",
"bytes": "307134"
}
],
"symlink_target": ""
}
|
import os
import ast
import sys
import fnmatch # Py 2
from setuptools import setup
def _glob_glob_recursive(directory, pattern):
# python 2 glob.glob doesn't have a recursive keyword
# this implements for the specific case that we want an exact match
# See also https://stackoverflow.com/a/2186565
matches = []
for root, dirname, filenames in os.walk(directory):
matches.extend([os.path.join(root, filename)
for filename in fnmatch.filter(filenames, pattern)])
return matches
class VersionPyFinder(object):
_VERSION_PY_FUNCTIONS = ['get_git_version', 'get_setup_cfg']
def __init__(self, filename='version.py', max_depth=2):
self.filename_base = filename
self.max_depth = max_depth
self.depth = None
self.filename = os.getenv("AUTORELEASE_VERSION_PY",
self._first_eligible())
self.functions = self._get_functions(self.filename)
def _find_files(self):
# all_files = glob.glob("**/" + self.filename_base, recursive=True)
all_files = _glob_glob_recursive('.', self.filename_base)
meets_depth = [fname for fname in all_files
if len(fname.split(os.sep)) <= self.max_depth + 1]
return meets_depth
def _is_eligible(self, filename):
with open(filename, mode='r') as f:
contents = f.read()
tree = ast.parse(contents)
# we requrie that our functions be defined at module level -- we
# know that's how we wrote them, at least!
all_functions = [node.name for node in tree.body
if isinstance(node, ast.FunctionDef)]
return all(func in all_functions
for func in self._VERSION_PY_FUNCTIONS)
def _first_eligible(self):
all_files = self._find_files()
for fname in all_files:
if self._is_eligible(fname):
return fname
return None
@property
def version_setup_depth(self):
def get_depth(fname):
return len(os.path.abspath(fname).split(os.sep))
# we assume thta setup.py is in the same dir as setup.cfg
diff = get_depth(self.filename) - get_depth(__file__)
return diff
def _get_functions(self, filename):
with open(self.filename, mode='r') as f:
contents = f.read()
tree = ast.parse(contents)
class MakeImportError(ast.NodeTransformer):
"""converts a from x import y into an import error"""
def __init__(self, import_name):
self.import_name = import_name
def visit_ImportFrom(self, node):
if node.module == self.import_name:
replacement = ast.parse("raise ImportError()").body[0]
return ast.copy_location(replacement, node)
else:
return node
import_remover = MakeImportError("_installed_version")
tree = import_remover.visit(tree)
ast.fix_missing_locations(tree)
locs = dict(globals())
exec(compile(tree, filename="version.py", mode='exec'), locs)
return {f: locs[f] for f in self._VERSION_PY_FUNCTIONS}
def write_installed_version_py(filename="_installed_version.py",
src_dir=None):
version_finder = VersionPyFinder()
directory = os.path.dirname(version_finder.filename)
depth = version_finder.version_setup_depth
get_git_version = version_finder.functions['get_git_version']
get_setup_cfg = version_finder.functions['get_setup_cfg']
installed_version = os.path.join(directory, "_installed_version.py")
content = "_installed_version = '{vers}'\n"
content += "_installed_git_hash = '{git}'\n"
content += "_version_setup_depth = {depth}\n"
# question: if I use the __file__ attribute in something I compile from
# here, what is the file?
my_dir = os.path.abspath(os.path.dirname(__file__))
conf = get_setup_cfg(directory=my_dir, filename='setup.cfg')
# conf = get_setup_cfg(directory=my_dir, filename='new_setup.cfg')
version = conf.get('metadata', 'version')
git_rev = get_git_version()
# TODO: shouldn't vwe just use the directory found by the
# VersionPyFinder?
if src_dir is None:
src_dir = conf.get('metadata', 'name')
with open (os.path.join(src_dir, filename), 'w') as f:
f.write(content.format(vers=version, git=git_rev, depth=depth))
if __name__ == "__main__":
# TODO: only write version.py under special circumstances
write_installed_version_py()
# write_version_py(os.path.join('autorelease', 'version.py'))
setup()
|
{
"content_hash": "b0f3db20aba37436ef8bec58d75c74a0",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 76,
"avg_line_length": 37.44444444444444,
"alnum_prop": 0.6102161933022467,
"repo_name": "openpathsampling/openpathsampling",
"id": "cf559c88a43da16d9fb68bd3309659c3459c0a7c",
"size": "4759",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1167"
},
{
"name": "CSS",
"bytes": "2687"
},
{
"name": "HTML",
"bytes": "81"
},
{
"name": "Makefile",
"bytes": "64"
},
{
"name": "Python",
"bytes": "2693413"
},
{
"name": "Shell",
"bytes": "8655"
}
],
"symlink_target": ""
}
|
from radish.stepregistry import step
from radish import when
@step("I have the number {number:g}")
def have_number(step, number):
step.context.numbers.append(number)
@when("I sum them")
def sum_numbers(step):
step.context.result = sum(step.context.numbers)
|
{
"content_hash": "4b82368867a84f5aa16df0776bcaf4f3",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 51,
"avg_line_length": 22.416666666666668,
"alnum_prop": 0.7323420074349443,
"repo_name": "SamuelYvon/radish",
"id": "1854f6293953ba9a4855497a8a23cebe6676a2f4",
"size": "294",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/functional/multiple_basedirs/radish1/steps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "20376"
},
{
"name": "Python",
"bytes": "261585"
},
{
"name": "Shell",
"bytes": "1686"
}
],
"symlink_target": ""
}
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 0, transform = "RelativeDifference", sigma = 0.0, exog_count = 100, ar_order = 12);
|
{
"content_hash": "c7c369a292a7b99f7e3c55ef4706e05d",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 177,
"avg_line_length": 39.57142857142857,
"alnum_prop": 0.7184115523465704,
"repo_name": "antoinecarme/pyaf",
"id": "df922dc4f82bdf099a59bd9b601c1641117c24fb",
"size": "277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_RelativeDifference/trend_LinearTrend/cycle_0/ar_12/test_artificial_128_RelativeDifference_LinearTrend_0_12_100.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from test_rest.models import Plant, Animal
# Register your models here.
admin.site.register(Plant)
admin.site.register(Animal)
|
{
"content_hash": "db8425808b0a497d8d9f5d5e8c45aee9",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 42,
"avg_line_length": 23,
"alnum_prop": 0.8074534161490683,
"repo_name": "sorenh/angular-django-rest-resource",
"id": "3ab7b84b4f993e368911d822f05a1fa9a6dac30b",
"size": "161",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/django_project/test_rest/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1840"
},
{
"name": "Python",
"bytes": "5017"
}
],
"symlink_target": ""
}
|
__author__ = 'reggie'
###START-CONF
##{
##"object_name": "StubModel",
##"object_poi": "vph-101",
##"group" : "public",
##"remoting" : "False",
##"parameters": [
## {
## "name": "StubModelParam",
## "description": "data directory tarred",
## "required": true,
## "type": "StringFileTar",
## "format": "FileString",
## "state" : "XSimX1X2"
## }
## ],
##"return": [
## {
## "name": "Xsim",
## "description": "returns Xsim output",
## "required": true,
## "type": "StringFileTar",
## "format": "FileString",
## "state" : "XsimOut"
## }
##
## ] }
##END-CONF
import subprocess
import os
import shutil
from pumpkin import PmkSeed
class StubModel(PmkSeed.Seed):
def __init__(self, context, poi=None):
PmkSeed.Seed.__init__(self, context,poi)
self.wd = self.context.getWorkingDir()
#Set environment
#self.env = os.environ.copy()
#self.env['R_LIBS'] = self.wd
#self.script_path = self.wd+"/None"
pass
def on_load(self):
print "Loading: " + self.__class__.__name__
#shutil.copy(self.wd+"DataPacket.pkt", self.wd+"rx/DataPacket.pkt")
pass
def split(self, pkt, data):
""" Split the pkt into many packets to distribute processing.
In this case we need to untar the X1 X2 Xsim csv files,
grab a single entry from each file into a new file for X1' X2' Xsim'
retar the files and dispatch.
After splitting and dispatching packets. A termination packet needs to be sent with the number
of packets as its payload like so:
# lpkt = self.last_fragment_pkt(pkt, frag_no+1)
# self.dispatch(lpkt, str(frag_no-1), "XSimX1X2", type="FileString")
Where:
lpkt is a termination packet
frag_no is the number of split packets
When implemented change the return to True else split will not be invoked.
"""
return False
def run(self, pkt, TarFile):
prot,rel_path,filep,apath,rpath = self.fileparts(TarFile)
new_file_name = self.get_ship_id(pkt)+"-"+self.get_cont_id(pkt)+"-"+self.get_name()
self.logger.debug("Adding stub Xsim-ouput.csv to tar file ["+filep+"]")
fout = self._add_to_tar("/data/Xsim-output.csv", rpath, rename=new_file_name)
if fout:
self.logger.debug("Dispatching file: "+str(fout))
self.dispatch(pkt,"file://"+str(fout), "XsimOut")
pass
def merge(self, pkt, data):
""" Merging does the opposite of splitting. Merge function is called
after every run(). To make sure all split packets are received we
can keep a counter as below. After all packets are received you can
merge the data into a new packet.
# if self.is_last_fragment(pkt):
# self.last_received = True
#
# # Number of split packets expected
# self.exp_pkts = int(data)
# else:
#
# # Counter to keep track of split packets
# self.packt_co += 1
# self.str_pkts.append(pkt)
#
# # Merge when all packets received
# if self.packt_co == self.exp_pkts:
# for spkt in self.str_pkts:
# pkt_data = self.get_pkt_data(spkt)
# # Do Merging Untat, combine, retar
#
#
# npkt = self.clean_header(pkt)
# self.dispatch(npkt,"file://"+filepath, "XsimOut")
"""
pass
|
{
"content_hash": "dba869ddd31d29d6e9136394a1b74a40",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 106,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.5163043478260869,
"repo_name": "recap/pumpkin",
"id": "0d1726e53bf2c35fad2e923865ab2c161bb38e75",
"size": "3864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/sa/Models/StubModel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "21620"
},
{
"name": "Python",
"bytes": "250918"
},
{
"name": "Shell",
"bytes": "339"
}
],
"symlink_target": ""
}
|
from BinPy import *
from BinPy.analog.base import *
class Resistor(Analog):
"""
This Class implements the Resistor, having the following parameters:
'+' : Resistor end at positive potential
'-' : Resistor end at negative potential
'r' : Resistance value
'i' : Current flowing through the resistor
Example:
>>> from BinPy import *
>>> params = {'r':5}
>>> r = Resistor(params)
>>> r.getParams()
{'i': 0, '+': 0, 'r': 5, '-': 0}
>>> r.setVoltage(Connector(5), Connector(0))
{'i': 1.0, '+': 5, 'r': 5, '-': 0}
>>> r.setCurrent(10)
{'i': 10, '+': 50, 'r': 5, '-': 0}
>>> r.setResistance(10)
{'i': 5.0, '+': 50, 'r': 10, '-': 0}
"""
def __init__(self, params):
self.params = {'+': Connector(0), '-': Connector(0), 'i': 0, 'r':
0}
for i in params:
self.params[i] = params[i]
def setResistance(self, value):
self.params['r'] = value
self.params['i'] = (
self.params['+'].state - self.params['-'].state) / self.params['r']
return self.params
def getParams(self):
return self.params
def setCurrent(self, value):
self.params['i'] = value
self.params['+'].state = self.params['-'].state + \
(self.params['i'] * self.params['r'])
return self.params
def setVoltage(self, val1, val2):
if not(isinstance(val1, Connector) and isinstance(val2, Connector)):
raise Exception(
"Invalid Voltage Values, Expecting a Connector Class Object")
self.params['+'] = val1
self.params['-'] = val2
self.params['i'] = (
self.params['+'].state - self.params['-'].state) / self.params['r']
return self.params
def __repr__(self):
return str(self.params['r'])
|
{
"content_hash": "20cf1dc5e5ff60197b1f59976459ca94",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 79,
"avg_line_length": 31.114754098360656,
"alnum_prop": 0.5105374077976818,
"repo_name": "daj0ker/BinPy",
"id": "c944fc76b05f4371965ffe254f27a9e4c8a60c61",
"size": "1898",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "BinPy/analog/components.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "694067"
},
{
"name": "Shell",
"bytes": "2956"
}
],
"symlink_target": ""
}
|
"""Custom request handlers for pushing data to connected clients."""
from asyncio import Queue
import logging
from tornado.web import RequestHandler
from tornado.websocket import WebSocketHandler, WebSocketClosedError
from tornado.iostream import StreamClosedError
from tornado.log import access_log
from . import stores
logger = logging.getLogger("tornadose.handlers")
class BaseHandler(RequestHandler):
"""Base handler for subscribers. To be compatible with data stores
defined in :mod:`tornadose.stores`, custom handlers should inherit
this class and implement the :meth:`publish` method.
"""
def initialize(self, store):
"""Common initialization of handlers happens here. If additional
initialization is required, this method must either be called with
``super`` or the child class must assign the ``store`` attribute and
register itself with the store.
"""
assert isinstance(store, stores.BaseStore)
self.messages = Queue()
self.store = store
self.store.register(self)
async def submit(self, message):
"""Submit a new message to be published."""
await self.messages.put(message)
def publish(self):
"""Push a message to the subscriber. This method must be
implemented by child classes.
"""
raise NotImplementedError("publish must be implemented!")
class EventSource(BaseHandler):
"""Handler for server-sent events a.k.a. EventSource.
The EventSource__ interface has a few advantages over websockets:
* It is a normal HTTP connection and so can be more easily monitored
than websockets using tools like curl__ or HTTPie__.
* Browsers generally try to reestablish a lost connection
automatically.
* The publish/subscribe pattern is better suited to some applications
than the full duplex model of websockets.
__ https://developer.mozilla.org/en-US/docs/Web/API/EventSource
__ http://curl.haxx.se/
__ https://github.com/jkbrzt/httpie
"""
def initialize(self, store):
super(EventSource, self).initialize(store)
self.finished = False
self.set_header("content-type", "text/event-stream")
self.set_header("cache-control", "no-cache")
def prepare(self):
"""Log access."""
request_time = 1000.0 * self.request.request_time()
access_log.info(
"%d %s %.2fms", self.get_status(), self._request_summary(), request_time
)
async def publish(self, message):
"""Pushes data to a listener."""
try:
self.write("data: {}\n\n".format(message))
await self.flush()
except StreamClosedError:
self.finished = True
async def get(self, *args, **kwargs):
try:
while not self.finished:
message = await self.messages.get()
await self.publish(message)
except Exception:
pass
finally:
self.store.deregister(self)
self.finish()
class WebSocketSubscriber(BaseHandler, WebSocketHandler):
"""A Websocket-based subscription handler."""
def initialize(self, store):
super(WebSocketSubscriber, self).initialize(store)
self.finished = False
async def open(self):
"""Register with the publisher."""
self.store.register(self)
while not self.finished:
message = await self.messages.get()
await self.publish(message)
def on_close(self):
self._close()
def _close(self):
self.store.deregister(self)
self.finished = True
async def publish(self, message):
"""Push a new message to the client. The data will be
available as a JSON object with the key ``data``.
"""
try:
self.write_message(dict(data=message))
except WebSocketClosedError:
self._close()
|
{
"content_hash": "9fde316b4db93bfd319cba6d69d6caaf",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 84,
"avg_line_length": 32.267716535433074,
"alnum_prop": 0.6220107369448511,
"repo_name": "mivade/tornadose",
"id": "42a04ec94085998408cbbc3323b495496277ac03",
"size": "4098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tornadose/handlers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "471"
},
{
"name": "Python",
"bytes": "15250"
}
],
"symlink_target": ""
}
|
__revision__ = "$Id: acspyTestUnitACSHandler.py,v 1.1.1.1 2012/03/07 17:40:45 acaproni Exp $"
#--REGULAR IMPORTS-------------------------------------------------------------
import unittest
import mock
from time import gmtime,sleep
import logging
#--ACS IMPORTS____-------------------------------------------------------------
def dummyACSLogSvc():
return None
import ACSLog
import Acspy.Util.ACSCorba
Acspy.Util.ACSCorba.acsLogSvc = dummyACSLogSvc
import Acspy.Common.ACSHandler as ACSHandler
# In order to run without actually using the CORBA interfaces, we need to
# create a mock ACSLog.LogSvc object.
## methdict = {}
## for meth in [x for x in ACSLog._objref_LogSvc.__methods__]:
## methdict[meth] = None
## methdict['__repr__'] = "ACSLog.LogSvc"
## methdict['__str__'] = "ACSLog.LogSvc"
mockLogSvc = mock.Mock(spec=ACSLog._objref_LogSvc)
# Replacing the acsLogSvc call ensures that we are using the mock object
# in all situations
def mockACSLogSvc():
return mockLogSvc
class ACSFormatterCheck(unittest.TestCase):
"""Test of the ACSFormatter class."""
def setUp(self):
self.f = ACSHandler.ACSFormatter()
def testConstructor(self):
"""ACSFormatter initialized"""
self.assertEqual("%(asctime)s.%(msecs)03d %(name)s %(message)s", self.f._fmt)
self.assertEqual("%Y-%m-%dT%H:%M:%S", self.f.datefmt)
self.assertEqual(gmtime, self.f.converter)
def testFormatNoData(self):
"""ACSFormatter formats a log record that has no data attribute correctly"""
lr = ACSHandler.ACSLogRecord("Simple", "TRACE", "/path/to/file.py", 100, "Test text", [], None)
lr.created = 0
lr.msecs = 0
s = self.f.format(lr)
self.assertEqual("1970-01-01T00:00:00.000 Simple Test text",s)
def testFormatNoneData(self):
"""ACSFormatter formats a log record that has no data attribute correctly"""
lr = ACSHandler.ACSLogRecord("Simple", "TRACE", "/path/to/file.py", 100, "Test text", [], None)
lr.created = 0
lr.msecs = 0
lr.data = None
s = self.f.format(lr)
self.assertEqual("1970-01-01T00:00:00.000 Simple Test text [ ]",s)
def testFormatEmptyDataDict(self):
"""ACSFormatter formats a log record with an empty data dictionary attribute correctly"""
lr = ACSHandler.ACSLogRecord("Simple", "TRACE", "/path/to/file.py", 100, "Test text", [], None)
lr.created = 0
lr.msecs = 0
lr.data = {}
s = self.f.format(lr)
self.assertEqual("1970-01-01T00:00:00.000 Simple Test text [ ]",s)
def testFormatEmptyDataList(self):
"""ACSFormatter formats a log record with an empty data list attribute correctly"""
lr = ACSHandler.ACSLogRecord("Simple", "TRACE", "/path/to/file.py", 100, "Test text", [], None)
lr.created = 0
lr.msecs = 0
lr.data = []
s = self.f.format(lr)
self.assertEqual("1970-01-01T00:00:00.000 Simple Test text [ ]",s)
def testFormatDataDict(self):
"""ACSFormatter formats a log record with a data dictionary attribute correctly"""
lr = ACSHandler.ACSLogRecord("Simple", "TRACE", "/path/to/file.py", 100, "Test text", [], None)
lr.created = 0
lr.msecs = 0
lr.data = { 'a' : 'A', 5 : '5', 'B' : 9 }
s = self.f.format(lr)
self.assertEqual("1970-01-01T00:00:00.000 Simple Test text [ a=A B=9 5=5 ]",s)
def testFormatDataList(self):
"""ACSFormatter formats a log record with a data list attribute correctly"""
lr = ACSHandler.ACSLogRecord("Simple", "TRACE", "/path/to/file.py", 100, "Test text", [], None)
lr.created = 0
lr.msecs = 0
lr.data = [ ACSLog.NVPair('a', 'A'), ACSLog.NVPair('5', '5'), ACSLog.NVPair('B', '9') ]
s = self.f.format(lr)
self.assertEqual("1970-01-01T00:00:00.000 Simple Test text [ a=A 5=5 B=9 ]",s)
class ACSLogRecordCheck(unittest.TestCase):
"""Test of the ACSLogRecord class."""
def testSimpleConstructor(self):
"""ACSLogRecord initialized with simple name"""
lr = ACSHandler.ACSLogRecord("Simple", "TRACE", "/path/to/file.py", 100, "Test text", [], None)
self.assertEqual("Simple", lr.name)
self.assertEqual(lr.name, lr.source)
self.assertEqual("/path/to/file.py", lr.pathname)
self.assertEqual("file.py", lr.filename)
self.assertEqual("file", lr.module)
self.assertEqual("Test text", lr.msg)
self.assertEqual([], lr.args)
self.assertEqual(None, lr.exc_info)
self.assertEqual(None, lr.funcName)
def testNestedConstructor(self):
"""ACSLogRecord initialized with nested name"""
lr = ACSHandler.ACSLogRecord("Nested.Name", "TRACE", "/path/to/file.py", 100, "Test text", [], None)
self.assertEqual("Name", lr.name)
self.assertEqual("Nested", lr.source)
class ACSHandlerCheck(unittest.TestCase):
"""Test of the ACSHandler class."""
def setUp(self):
ACSHandler.acsLogSvc = mockACSLogSvc
def tearDown(self):
pass
def testConstructorDefault(self):
"""ACSHandler initialized"""
h = ACSHandler.ACSHandler()
self.assertEqual(ACSHandler.DEFAULT_MAXIMUM_QUEUE, h.capacity)
self.assertEqual(ACSHandler.DEFAULT_RECORD_CAPACITY, h.batchsize)
self.assertEqual(ACSHandler.DEFAULT_IMMEDIATE_DISPATCH, h.dispatchlevel)
self.assertEqual(mockLogSvc.__repr__(), h.logSvc.__repr__())
def testConstructorNoFlushThread(self):
"""ACSHandler initialized with no periodic flush thread"""
h = ACSHandler.ACSHandler()
self.assertEqual(False, 'sched' in h.__dict__)
def testConstructorNoLogSvc(self):
"""ACSHandler initialized when acsLogSvc not available"""
ACSHandler.acsLogSvc = dummyACSLogSvc
h = ACSHandler.ACSHandler()
self.assertEqual(ACSHandler.DEFAULT_MAXIMUM_QUEUE, h.capacity)
self.assertEqual(None, h.logSvc)
def testGetCORBALoggerCache(self):
"""ACSHandler returns cached CORBA logging service"""
h = ACSHandler.ACSHandler()
self.assertEqual(mockLogSvc.__repr__(), h.getCORBALogger().__repr__())
def testGetCORBALoggerCacheReload(self):
"""ACSHandler reloads CORBA logging service when cache is empty"""
h = ACSHandler.ACSHandler()
h.logSvc = None
self.assertEqual(mockLogSvc.__repr__(), h.getCORBALogger().__repr__())
self.assertEqual(mockLogSvc.__repr__(), h.logSvc.__repr__())
def testSendLog(self):
"""ACSHandler sends Log messages at the appropriate levels"""
import Acspy.Common.Log
h = ACSHandler.ACSHandler()
# Make logging calls for all known logging levels except OFF and NOTSET
expected = []
keys = Acspy.Common.Log.LEVELS.keys()
keys.sort()
for l in keys:
if l == 0 or l == 99:
continue
else:
if l in [7, 8, ACSLog.ACS_LOG_ERROR]:
expected.append("logWithAudience")
else:
expected.append("log" + Acspy.Common.Log.getLevelName(l).capitalize())
lr = ACSHandler.ACSLogRecord("Sample", Acspy.Common.Log.LEVELS[l], "/path/to/file.py", 100, expected[-1], None, None)
h.sendLog(lr)
# Append an unknown level
expected.append("logCritical")
lr = ACSHandler.ACSLogRecord("Sample", 75, "/path/to/file.py", 100, "Unknown", None, None)
h.sendLog(lr)
self.assertEquals(expected, [ n[0] for n in mockLogSvc.method_calls[-len(keys)+1:]])
def testSendLogErrorTrace(self):
"""ACSHandler sends errortrace messages at the appropriate levels"""
import Acspy.Common.Log
import Acspy.Common.ErrorTrace as ErrorTrace
l = Acspy.Common.Log.Logger('test')
h = ACSHandler.ACSHandler()
et = ErrorTrace.ErrorTrace(1,1)
extras = { 'errortrace' : et, 'priority' : ACSLog.ACS_LOG_ERROR }
lr = l.makeRecord("Sample", Acspy.Common.Log.LEVELS[8], "/path/to/file.py",
100, 'ErrorTrace', (), None, 'dummy', extras)
h.sendLog(lr)
logcall = mockLogSvc.method_calls[-1]
self.assertEquals("logErrorWithPriority", logcall[0])
self.assertEquals(ACSLog.ACS_LOG_ERROR, logcall[1][1])
def testSendLogWithAudience(self):
"""ACSHandler sends logWithAudience messages at the appropriate levels"""
import Acspy.Common.Log
l = Acspy.Common.Log.Logger('test')
h = ACSHandler.ACSHandler()
extras = { 'priority' : ACSLog.ACS_LOG_ERROR, 'audience' : "", 'array' : "", 'antenna' : "" }
lr = l.makeRecord("Sample", Acspy.Common.Log.LEVELS[8], "/path/to/file.py",
100, 'ErrorTrace', (), None, 'dummy', extras)
h.sendLog(lr)
logcall = mockLogSvc.method_calls[-1]
self.assertEquals("logWithAudience", logcall[0])
self.assertEquals(ACSLog.ACS_LOG_ERROR, logcall[1][0])
def testSendLogWithPriority(self):
"""ACSHandler sends logWithPriority messages at the appropriate levels"""
import Acspy.Common.Log
l = Acspy.Common.Log.Logger('test')
h = ACSHandler.ACSHandler()
ctxt = ACSLog.RTContext('a', 'b', 'c', 'd', 'e')
srcinf = ACSLog.SourceInfo('a', 'b', 'c')
extras = { 'priority' : ACSLog.ACS_LOG_ERROR, 'data': [], 'rtCont' : ctxt, 'srcInfo' : srcinf, 'audience' : "", 'array' : "", 'antenna' : "" }
lr = l.makeRecord("Sample", Acspy.Common.Log.LEVELS[8], "/path/to/file.py",
100, 'ErrorTrace', (), None, 'dummy', extras)
h.sendLog(lr)
logcall = mockLogSvc.method_calls[-1]
self.assertEquals("logWithPriority", logcall[0])
self.assertEquals(ACSLog.ACS_LOG_ERROR, logcall[1][0])
self.assertEquals(True, isinstance(logcall[1][3], ACSLog.RTContext))
self.assertEquals(ctxt, logcall[1][3])
self.assertEquals(True, isinstance(logcall[1][4], ACSLog.SourceInfo))
self.assertEquals(srcinf, logcall[1][4])
def testSendLogWithPriorityNoContextorSource(self):
"""ACSHandler sends logWithPriority messages at the appropriate levels without provided context or source"""
import Acspy.Common.Log
l = Acspy.Common.Log.Logger('test')
h = ACSHandler.ACSHandler()
extras = { 'priority' : ACSLog.ACS_LOG_ERROR, 'data': [], 'audience' : "", 'array' : "", 'antenna' : "" }
lr = l.makeRecord("Sample", Acspy.Common.Log.LEVELS[8], "/path/to/file.py",
100, 'ErrorTrace', (), None, 'dummy', extras)
h.sendLog(lr)
logcall = mockLogSvc.method_calls[-1]
self.assertEquals("logWithPriority", logcall[0])
self.assertEquals(ACSLog.ACS_LOG_ERROR, logcall[1][0])
self.assertEquals(True, isinstance(logcall[1][3], ACSLog.RTContext))
self.assertEquals(True, isinstance(logcall[1][4], ACSLog.SourceInfo))
def testSendLogWithPriorityBadContextandSource(self):
"""ACSHandler sends logWithPriority messages at the appropriate levels with bad context"""
import Acspy.Common.Log
l = Acspy.Common.Log.Logger('test')
h = ACSHandler.ACSHandler()
extras = { 'priority' : ACSLog.ACS_LOG_ERROR, 'data': [], 'rtCont': None, 'srcInfo' : None, 'audience' : "", 'array' : "", 'antenna' : "" }
lr = l.makeRecord("Sample", Acspy.Common.Log.LEVELS[8], "/path/to/file.py",
100, 'ErrorTrace', (), None, 'dummy', extras)
h.sendLog(lr)
logcall = mockLogSvc.method_calls[-1]
self.assertEquals("logWithPriority", logcall[0])
self.assertEquals(ACSLog.ACS_LOG_ERROR, logcall[1][0])
self.assertEquals(True, isinstance(logcall[1][3], ACSLog.RTContext))
self.assertEquals(False, logcall[1][3] is None)
self.assertEquals(True, isinstance(logcall[1][4], ACSLog.SourceInfo))
self.assertEquals(False, logcall[1][4] is None)
def testShouldFlushCapacity(self):
"""ACSHandler flushes when capacity is reached"""
h = ACSHandler.ACSHandler()
lr = ACSHandler.ACSLogRecord("Nested.Name", logging.NOTSET+1, "/path/to/file.py", 100, "Test text", [], None)
self.assertEqual(False, h.shouldFlush(lr))
h.buffer += [ lr, lr, lr, lr, lr, lr, lr, lr, lr, lr ]
self.assertEqual(True, h.shouldFlush(lr))
def testShouldFlushPriority(self):
"""ACSHandler flushes when high priority message is received"""
h = ACSHandler.ACSHandler()
hr = ACSHandler.ACSLogRecord("High.Name", logging.CRITICAL+2, "/path/to/file.py", 100, "High Test text", [], None)
self.assertEqual(True, h.shouldFlush(hr))
def testFlushLevel(self):
"""ACSHandler flushes buffer when message priority greater than threshold arrives"""
h = ACSHandler.ACSHandler()
lr = ACSHandler.ACSLogRecord("Nested.Name", logging.INFO, "/path/to/file.py", 100, "Test text", [], None)
self.assertEqual(False, h.shouldFlush(lr))
lr = ACSHandler.ACSLogRecord("Nested.Name", logging.CRITICAL+1, "/path/to/file.py", 100, "Test text", [], None)
self.assertEqual(True, h.shouldFlush(lr))
def testFlushToFile(self):
"""ACSHandler writes log messages to a file"""
h = ACSHandler.ACSHandler()
h.file_handler = mock.Mock(spec=logging.Handler)
lr = ACSHandler.ACSLogRecord("Nested.Name", "TRACE", "/path/to/file.py", 100, "Test text", [], None)
h.flushToFile(lr)
self.assertEqual('handle',h.file_handler.method_calls[0][0])
def testFlushToFileNoHandler(self):
"""ACSHandler creates singleton file handler when necessary"""
def mockInitFileHandler(self):
self.file_handler = mock.Mock(spec=logging.FileHandler)
holdmethod = ACSHandler.ACSHandler.initFileHandler
ACSHandler.ACSHandler.initFileHandler = mockInitFileHandler
h = ACSHandler.ACSHandler()
lr = ACSHandler.ACSLogRecord("Nested.Name", "TRACE", "/path/to/file.py", 100, "Test text", [], None)
self.assertEqual(True, h.file_handler is None)
h.flushToFile(lr)
self.assertEqual(False, h.file_handler is None)
ACSHandler.ACSHandler.initFileHandler = holdmethod
def testFlush(self):
"""ACSHandler flushes buffer correctly"""
h = ACSHandler.ACSHandler()
lr = ACSHandler.ACSLogRecord("Nested.Name", logging.INFO, "/path/to/file.py", 100, "Test text", None, None)
h.buffer = [ lr, lr ]
expected = [ 'logInfo' , 'logInfo' ]
h.flush()
self.assertEquals(expected, [ n[0] for n in mockLogSvc.method_calls[-2:]])
def testFlushException(self):
"""ACSHandler handles exceptions correctly when flushing buffer"""
def mockSendLog(self, record): raise Exception()
holdSendLog = ACSHandler.ACSHandler.sendLog
ACSHandler.ACSHandler.sendLog = mockSendLog
h = ACSHandler.ACSHandler()
h.file_handler = mock.Mock(spec=logging.Handler)
lr = ACSHandler.ACSLogRecord("Nested.Name", logging.INFO, "/path/to/file.py", 100, "Test text", None, None)
h.buffer = [ lr, lr ]
expected = [ 'handle' , 'handle' ]
h.flush()
self.assertEquals(expected, [ n[0] for n in h.file_handler.method_calls[-2:]])
ACSHandler.ACSHandler.sendLog = holdSendLog
def testFullLogQueue(self):
"""ACSHandler drops new messages when pending message queue is full"""
def mockFlush(self): return 0
holdFlush = ACSHandler.ACSHandler.flush
ACSHandler.ACSHandler.flush = mockFlush
h = ACSHandler.ACSHandler(capacity=ACSHandler.DEFAULT_RECORD_CAPACITY)
lr = ACSHandler.ACSLogRecord("Nested.Name", logging.INFO, "/path/to/file.py", 100, "Test text", None, None)
lrn = ACSHandler.ACSLogRecord("Name", logging.INFO, "/path/to/file.py", 100, "Test text", None, None)
h.buffer += [ lr, lr, lr, lr, lr, lr, lr, lr, lr, lr ]
self.assertEqual([ lr, lr, lr, lr, lr, lr, lr, lr, lr, lr ], h.buffer)
h.handle(lrn)
self.assertEqual([ lr, lr, lr, lr, lr, lr, lr, lr, lr, lr ], h.buffer)
ACSHandler.ACSHandler.flush = holdFlush
def testFullLogQueueFilter(self):
"""ACSHandler drops low priority messages when message queue is full"""
def mockShouldFlush(self, record): return 0
holdShouldFlush = ACSHandler.ACSHandler.shouldFlush
ACSHandler.ACSHandler.shouldFlush = mockShouldFlush
h = ACSHandler.ACSHandler(capacity=ACSHandler.DEFAULT_RECORD_CAPACITY)
lra = ACSHandler.ACSLogRecord("Nested.Name", logging.NOTSET+1, "/path/to/file.py", 100, "Test text", None, None)
lrb = ACSHandler.ACSLogRecord("Name", logging.DEBUG, "/path/to/file.py", 100, "Test text", None, None)
lrc = ACSHandler.ACSLogRecord("Name", logging.INFO, "/path/to/file.py", 100, "Test text", None, None)
lrd = ACSHandler.ACSLogRecord("Name", logging.ERROR, "/path/to/file.py", 100, "Test text", None, None)
h.buffer += [ lrd, lra, lrc, lrb, lrd, lrb, lra, lrc, lra, lrd ]
self.assertEqual([ lrd, lra, lrc, lrb, lrd, lrb, lra, lrc, lra, lrd ], h.buffer)
h.handle(lrd)
self.assertEqual([ lrd, lrc, lrd, lrc, lrd, lrd ], h.buffer)
ACSHandler.ACSHandler.shouldFlush = holdShouldFlush
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ACSFormatterCheck))
suite.addTest(unittest.makeSuite(ACSLogRecordCheck))
suite.addTest(unittest.makeSuite(ACSHandlerCheck))
return suite
if __name__ == "__main__":
unittest.main(defaultTest='suite')
#
# ___oOo___
|
{
"content_hash": "06ac4dbb11cf743ee1f11147d767b027",
"timestamp": "",
"source": "github",
"line_count": 381,
"max_line_length": 150,
"avg_line_length": 47.10498687664042,
"alnum_prop": 0.6233910960049033,
"repo_name": "csrg-utfsm/acscb",
"id": "2c64b1dbc94f624389de33e0aa122d29cd453069",
"size": "19164",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "LGPL/CommonSoftware/acspycommon/test/acspyTestUnitACSHandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "633"
},
{
"name": "Batchfile",
"bytes": "2346"
},
{
"name": "C",
"bytes": "751150"
},
{
"name": "C++",
"bytes": "7892598"
},
{
"name": "CSS",
"bytes": "21364"
},
{
"name": "Elixir",
"bytes": "906"
},
{
"name": "Emacs Lisp",
"bytes": "1990066"
},
{
"name": "FreeMarker",
"bytes": "7369"
},
{
"name": "GAP",
"bytes": "14867"
},
{
"name": "Gnuplot",
"bytes": "437"
},
{
"name": "HTML",
"bytes": "1857062"
},
{
"name": "Haskell",
"bytes": "764"
},
{
"name": "Java",
"bytes": "13573740"
},
{
"name": "JavaScript",
"bytes": "19058"
},
{
"name": "Lex",
"bytes": "5101"
},
{
"name": "Makefile",
"bytes": "1624406"
},
{
"name": "Module Management System",
"bytes": "4925"
},
{
"name": "Objective-C",
"bytes": "3223"
},
{
"name": "PLSQL",
"bytes": "9496"
},
{
"name": "Perl",
"bytes": "120411"
},
{
"name": "Python",
"bytes": "4191000"
},
{
"name": "Roff",
"bytes": "9920"
},
{
"name": "Shell",
"bytes": "1198375"
},
{
"name": "Smarty",
"bytes": "21615"
},
{
"name": "Tcl",
"bytes": "227078"
},
{
"name": "XSLT",
"bytes": "100454"
},
{
"name": "Yacc",
"bytes": "5006"
}
],
"symlink_target": ""
}
|
import sys
class StripeError(Exception):
def __init__(self, message=None, http_body=None, http_status=None,
json_body=None, headers=None):
super(StripeError, self).__init__(message)
if http_body and hasattr(http_body, 'decode'):
try:
http_body = http_body.decode('utf-8')
except BaseException:
http_body = ('<Could not decode body as utf-8. '
'Please report to support@stripe.com>')
self._message = message
self.http_body = http_body
self.http_status = http_status
self.json_body = json_body
self.headers = headers or {}
self.request_id = self.headers.get('request-id', None)
def __unicode__(self):
if self.request_id is not None:
msg = self._message or "<empty message>"
return u"Request {0}: {1}".format(self.request_id, msg)
else:
return self._message
if sys.version_info > (3, 0):
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
return unicode(self).encode('utf-8')
class APIError(StripeError):
pass
class APIConnectionError(StripeError):
pass
class CardError(StripeError):
def __init__(self, message, param, code, http_body=None,
http_status=None, json_body=None, headers=None):
super(CardError, self).__init__(
message, http_body, http_status, json_body,
headers)
self.param = param
self.code = code
class InvalidRequestError(StripeError):
def __init__(self, message, param, http_body=None,
http_status=None, json_body=None, headers=None):
super(InvalidRequestError, self).__init__(
message, http_body, http_status, json_body,
headers)
self.param = param
class AuthenticationError(StripeError):
pass
class PermissionError(StripeError):
pass
class RateLimitError(StripeError):
pass
class OAuthError(StripeError):
def __init__(self, type, description=None, http_body=None,
http_status=None, json_body=None, headers=None):
description = description or type
super(OAuthError, self).__init__(
description, http_body, http_status, json_body, headers)
self.type = type
|
{
"content_hash": "7706e683f36a6f24645a8f2db6ea2b75",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 70,
"avg_line_length": 27.86046511627907,
"alnum_prop": 0.5847245409015025,
"repo_name": "lextoumbourou/txstripe",
"id": "83a98f3cd97a8a4b7e4a9d578ac00a6b93df0ff1",
"size": "2409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stripe/error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "257331"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe
from frappe.utils import strip
from frappe.website.doctype.website_theme.website_theme import get_active_theme
no_sitemap = 1
base_template_path = "templates/www/website_script.js"
def get_context(context):
context.javascript = frappe.db.get_single_value('Website Script',
'javascript') or ""
theme = get_active_theme()
js = strip(theme and theme.js or "")
if js:
context.javascript += "\n" + js
if not frappe.conf.developer_mode:
context["google_analytics_id"] = (frappe.db.get_single_value("Website Settings", "google_analytics_id")
or frappe.conf.get("google_analytics_id"))
|
{
"content_hash": "d57d745c4e02f7795c21b0f758d51191",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 105,
"avg_line_length": 32.5,
"alnum_prop": 0.7369230769230769,
"repo_name": "paurosello/frappe",
"id": "0db00bc3d879f6f18288b99c94b9fe9f94318ee1",
"size": "751",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "frappe/www/website_script.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "422931"
},
{
"name": "HTML",
"bytes": "202357"
},
{
"name": "JavaScript",
"bytes": "1858011"
},
{
"name": "Makefile",
"bytes": "29"
},
{
"name": "Python",
"bytes": "2042290"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
"""Runs the cluster insight data collector in master mode.
Collects context metadata from multiple places and computes a graph from it.
"""
import argparse
import logging
import sys
import flask
from flask_cors import CORS
# local imports
import collector_error
import constants
import context
import docker
import global_state
import kubernetes
import utilities
app = flask.Flask(__name__)
# enable cross-origin resource sharing (CORS) HTTP headers on all routes
cors = CORS(app)
def valid_id(x):
"""Tests whether 'x' a valid resource identifier.
A valid resource identifier is either None (which means you refer to every
resource) or a non-empty string.
Args:
x: a resource identifier or None.
Returns:
True iff 'x' is a valid resource identifier.
"""
return utilities.valid_optional_string(x)
def return_elapsed(gs):
"""Returns a description of the elapsed time of recent operations.
Args:
gs: global state.
Returns:
A dictionary containing the count, minimum elapsed time,
maximum elapsed time, average elapsed time, and list of elapsed time
records.
"""
assert isinstance(gs, global_state.GlobalState)
elapsed_list = []
elapsed_sum = 0.0
elapsed_min = None
elapsed_max = None
for elapsed_record in gs.get_elapsed():
duration = elapsed_record.elapsed_seconds
elapsed_list.append(
{'start_time': utilities.seconds_to_timestamp(
elapsed_record.start_time),
'what': elapsed_record.what,
'threadIdentifier': elapsed_record.thread_identifier,
'elapsed_seconds': duration})
elapsed_sum += duration
if (elapsed_min is None) or (elapsed_max is None):
elapsed_min = duration
elapsed_max = duration
else:
elapsed_min = min(elapsed_min, duration)
elapsed_max = max(elapsed_max, duration)
return {'count': len(elapsed_list),
'min': elapsed_min,
'max': elapsed_max,
'average': elapsed_sum / len(elapsed_list) if elapsed_list else None,
'items': elapsed_list}
@app.route('/', methods=['GET'])
def home():
"""Returns the response of the '/' endpoint.
Returns:
The home page of the Cluster-Insight data collector.
"""
return flask.send_from_directory('static', 'home.html')
@app.route('/cluster/resources/nodes', methods=['GET'])
def get_nodes():
"""Computes the response of the '/cluster/resources/nodes' endpoint.
Returns:
The nodes of the context graph.
"""
gs = app.context_graph_global_state
try:
nodes_list = kubernetes.get_nodes_with_metrics(gs)
except collector_error.CollectorError as e:
return flask.jsonify(utilities.make_error(str(e)))
except:
msg = 'kubernetes.get_nodes() failed with exception %s' % sys.exc_info()[0]
app.logger.exception(msg)
return flask.jsonify(utilities.make_error(msg))
return flask.jsonify(utilities.make_response(nodes_list, 'resources'))
@app.route('/cluster/resources/services', methods=['GET'])
def get_services():
"""Computes the response of the '/cluster/resources/services' endpoint.
Returns:
The services of the context graph.
"""
gs = app.context_graph_global_state
try:
services_list = kubernetes.get_services(gs)
except collector_error.CollectorError as e:
return flask.jsonify(utilities.make_error(str(e)))
except:
msg = ('kubernetes.get_services() failed with exception %s' %
sys.exc_info()[0])
app.logger.exception(msg)
return flask.jsonify(utilities.make_error(msg))
return flask.jsonify(utilities.make_response(services_list, 'resources'))
@app.route('/cluster/resources/rcontrollers', methods=['GET'])
def get_rcontrollers():
"""Computes the response of accessing the '/cluster/resources/rcontrollers'.
Returns:
The replication controllers of the context graph.
"""
gs = app.context_graph_global_state
try:
rcontrollers_list = kubernetes.get_rcontrollers(gs)
except collector_error.CollectorError as e:
return flask.jsonify(utilities.make_error(str(e)))
except:
msg = ('kubernetes.get_rcontrollers() failed with exception %s' %
sys.exc_info()[0])
app.logger.exception(msg)
return flask.jsonify(utilities.make_error(msg))
return flask.jsonify(utilities.make_response(rcontrollers_list, 'resources'))
@app.route('/cluster/resources/pods', methods=['GET'])
def get_pods():
"""Computes the response of the '/cluster/resources/pods' endpoint.
Returns:
The pods of the context graph.
"""
gs = app.context_graph_global_state
try:
pods_list = kubernetes.get_pods(gs, None)
except collector_error.CollectorError as e:
return flask.jsonify(utilities.make_error(str(e)))
except:
msg = 'kubernetes.get_pods() failed with exception %s' % sys.exc_info()[0]
app.logger.exception(msg)
return flask.jsonify(utilities.make_error(msg))
return flask.jsonify(utilities.make_response(pods_list, 'resources'))
@app.route('/cluster/resources/containers', methods=['GET'])
def get_containers():
"""Computes the response of the '/cluster/resources/containers' endpoint.
Returns:
The containers of the context graph.
"""
containers = []
gs = app.context_graph_global_state
try:
for node in kubernetes.get_nodes(gs):
# The node_id is the Docker host name.
docker_host = node['id']
containers.extend(docker.get_containers_with_metrics(gs, docker_host))
except collector_error.CollectorError as e:
return flask.jsonify(utilities.make_error(str(e)))
except:
msg = 'get_containers() failed with exception %s' % sys.exc_info()[0]
app.logger.exception(msg)
return flask.jsonify(utilities.make_error(msg))
return flask.jsonify(utilities.make_response(containers, 'resources'))
@app.route('/cluster/resources/processes', methods=['GET'])
def get_processes():
"""Computes the response of the '/cluster/resources/processes' endpoint.
Returns:
The processes of the context graph.
"""
processes = []
gs = app.context_graph_global_state
try:
for node in kubernetes.get_nodes(gs):
node_id = node['id']
docker_host = node_id
for container in docker.get_containers(gs, docker_host):
container_id = container['id']
processes.extend(docker.get_processes(gs, docker_host, container_id))
except collector_error.CollectorError as e:
return flask.jsonify(utilities.make_error(str(e)))
except:
msg = 'get_processes() failed with exception %s' % sys.exc_info()[0]
app.logger.exception(msg)
return flask.jsonify(utilities.make_error(msg))
return flask.jsonify(utilities.make_response(processes, 'resources'))
@app.route('/cluster/resources/images', methods=['GET'])
def get_images():
"""Computes the response of the '/cluster/resources/images' endpoint.
Returns:
The images of the context graph.
"""
gs = app.context_graph_global_state
# A dictionary from Image ID to wrapped image objects.
# If an image appears more than once, keep only its latest value.
images_dict = {}
try:
for node in kubernetes.get_nodes(gs):
for image in docker.get_images(gs, node['id']):
images_dict[image['id']] = image
except collector_error.CollectorError as e:
return flask.jsonify(utilities.make_error(str(e)))
except:
msg = 'kubernetes.get_images() failed with exception %s' % sys.exc_info()[0]
app.logger.exception(msg)
return flask.jsonify(utilities.make_error(msg))
# The images list is sorted by increasing identifiers.
images_list = [images_dict[key] for key in sorted(images_dict.keys())]
return flask.jsonify(utilities.make_response(images_list, 'resources'))
@app.route('/debug', methods=['GET'])
def get_debug():
"""Computes the response of the '/cluster/resources/debug' endpoint.
Returns:
The DOT graph depicting the context graph.
"""
gs = app.context_graph_global_state
try:
return context.compute_graph(gs, 'dot')
except collector_error.CollectorError as e:
return flask.jsonify(utilities.make_error(str(e)))
except:
msg = ('compute_graph(\"dot\") failed with exception %s' %
sys.exc_info()[0])
app.logger.exception(msg)
return flask.jsonify(utilities.make_error(msg))
@app.route('/cluster/resources', methods=['GET'])
def get_resources():
"""Computes the response of the '/cluster/resources' endpoint.
Returns:
The 'resources' section of the context graph.
"""
gs = app.context_graph_global_state
try:
response = context.compute_graph(gs, 'resources')
return flask.jsonify(response)
except collector_error.CollectorError as e:
return flask.jsonify(utilities.make_error(str(e)))
except:
msg = ('compute_graph(\"resources\") failed with exception %s' %
sys.exc_info()[0])
app.logger.exception(msg)
return flask.jsonify(utilities.make_error(msg))
@app.route('/cluster', methods=['GET'])
def get_cluster():
"""Computes the response of the '/cluster' endpoint.
Returns:
The entire context graph.
"""
gs = app.context_graph_global_state
try:
response = context.compute_graph(gs, 'context_graph')
return flask.jsonify(response)
except collector_error.CollectorError as e:
return flask.jsonify(utilities.make_error(str(e)))
except:
msg = ('compute_graph(\"context_graph\") failed with exception %s' %
sys.exc_info()[0])
app.logger.exception(msg)
return flask.jsonify(utilities.make_error(msg))
@app.route('/version', methods=['GET'])
def get_version():
"""Computes the response of the '/version' endpoint.
Returns:
The value of the docker.get_version() or an error message.
"""
gs = app.context_graph_global_state
try:
version = docker.get_version(gs)
return flask.jsonify(utilities.make_response(version, 'version'))
except collector_error.CollectorError as e:
return flask.jsonify(utilities.make_error(str(e)))
except:
msg = 'get_version() failed with exception %s' % sys.exc_info()[0]
app.logger.exception(msg)
return flask.jsonify(utilities.make_error(msg))
@app.route('/minions_status', methods=['GET'])
def get_minions():
"""Computes the response of the '/minions_status' endpoint.
Returns:
A dictionary from node names to the status of their minion collectors
or an error message.
"""
gs = app.context_graph_global_state
minions_status = {}
try:
for node in kubernetes.get_nodes(gs):
assert utilities.is_wrapped_object(node, 'Node')
docker_host = node['id']
minions_status[docker_host] = docker.get_minion_status(gs, docker_host)
except collector_error.CollectorError as e:
return flask.jsonify(utilities.make_error(str(e)))
except:
msg = 'get_minions_status() failed with exception %s' % sys.exc_info()[0]
app.logger.exception(msg)
return flask.jsonify(utilities.make_error(msg))
return flask.jsonify(utilities.make_response(minions_status, 'minionsStatus'))
@app.route('/elapsed', methods=['GET'])
def get_elapsed():
"""Computes the response of the '/elapsed' endpoint.
Returns:
A successful response containing the list of elapsed time records of the
most recent Kubernetes and Docker access operations since the previous
call to the '/elapsed' endpoint. Never returns more than
constants.MAX_ELAPSED_QUEUE_SIZE elapsed time records.
"""
gs = app.context_graph_global_state
try:
result = return_elapsed(gs)
return flask.jsonify(utilities.make_response(result, 'elapsed'))
except:
msg = 'get_elapsed() failed with exception %s' % sys.exc_info()[0]
app.logger.exception(msg)
return flask.jsonify(utilities.make_error(msg))
@app.route('/healthz', methods=['GET'])
def get_health():
"""Computes the response of the '/healthz' endpoint.
Returns:
A successful response containing the attribute 'health' and the value 'OK'.
"""
return flask.jsonify(utilities.make_response('OK', 'health'))
# Starts the web server and listen on all external IPs associated with this
# host.
def main():
parser = argparse.ArgumentParser(description='Cluster-Insight data collector')
parser.add_argument('-d', '--debug', action='store_true',
help='enable debug mode')
parser.add_argument('-p', '--port', action='store', type=int,
default=constants.DATA_COLLECTOR_PORT,
help=('data collector port number [default=%d]' %
constants.DATA_COLLECTOR_PORT))
parser.add_argument('--docker_port', action='store', type=int,
default=constants.DOCKER_PORT,
help=('Docker port number [default=%d]' %
constants.DOCKER_PORT))
parser.add_argument('-w', '--workers', action='store', type=int,
default=0,
help=('number of concurrent workers. A zero or a '
'negative value denotes an automatic calculation '
'of this number. [default=0]'))
args = parser.parse_args()
app.logger.setLevel(logging.DEBUG if args.debug else logging.INFO)
g_state = global_state.GlobalState()
g_state.init_caches_and_synchronization()
g_state.set_logger(app.logger)
g_state.set_docker_port(args.docker_port)
g_state.set_num_workers(args.workers)
app.context_graph_global_state = g_state
app.run(host='0.0.0.0', port=args.port, debug=args.debug)
if __name__ == '__main__':
main()
|
{
"content_hash": "03eebebe9eeffd2bfed01bdc03c6ceae",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 80,
"avg_line_length": 31.68470588235294,
"alnum_prop": 0.6845388385563642,
"repo_name": "EranGabber/cluster-insight",
"id": "9592d631b988197fa230b9eae7bab5f645066cdc",
"size": "14100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "collector/collector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3192"
},
{
"name": "Makefile",
"bytes": "1169"
},
{
"name": "Python",
"bytes": "191902"
},
{
"name": "Shell",
"bytes": "12645"
},
{
"name": "TeX",
"bytes": "206"
}
],
"symlink_target": ""
}
|
"""Utility for loading a snapshot of a CouchDB database from a multipart MIME
file.
"""
from __future__ import print_function
from base64 import b64encode
from optparse import OptionParser
import sys
from couchdb import __version__ as VERSION
from couchdb import json
from couchdb.client import Database
from couchdb.multipart import read_multipart
def load_db(fileobj, dburl, username=None, password=None, ignore_errors=False):
db = Database(dburl)
if username is not None and password is not None:
db.resource.credentials = (username, password)
for headers, is_multipart, payload in read_multipart(fileobj):
docid = headers['content-id']
if is_multipart: # doc has attachments
for headers, _, payload in payload:
if 'content-id' not in headers:
doc = json.decode(payload)
doc['_attachments'] = {}
else:
doc['_attachments'][headers['content-id']] = {
'data': b64encode(payload).decode('ascii'),
'content_type': headers['content-type'],
'length': len(payload)
}
else: # no attachments, just the JSON
doc = json.decode(payload)
del doc['_rev']
print('Loading document %r' % docid, file=sys.stderr)
try:
db[docid] = doc
except Exception as e:
if not ignore_errors:
raise
print('Error: %s' % e, file=sys.stderr)
def main():
parser = OptionParser(usage='%prog [options] dburl', version=VERSION)
parser.add_option('--input', action='store', dest='input', metavar='FILE',
help='the name of the file to read from')
parser.add_option('--ignore-errors', action='store_true',
dest='ignore_errors',
help='whether to ignore errors in document creation '
'and continue with the remaining documents')
parser.add_option('--json-module', action='store', dest='json_module',
help='the JSON module to use ("simplejson", "cjson", '
'or "json" are supported)')
parser.add_option('-u', '--username', action='store', dest='username',
help='the username to use for authentication')
parser.add_option('-p', '--password', action='store', dest='password',
help='the password to use for authentication')
parser.set_defaults(input='-')
options, args = parser.parse_args()
if len(args) != 1:
return parser.error('incorrect number of arguments')
if options.input != '-':
fileobj = open(options.input, 'rb')
else:
fileobj = sys.stdin
if options.json_module:
json.use(options.json_module)
load_db(fileobj, args[0], username=options.username,
password=options.password, ignore_errors=options.ignore_errors)
if __name__ == '__main__':
main()
|
{
"content_hash": "f3772d4bcf0d46720dcf5e1b585e5cd3",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 79,
"avg_line_length": 36.71084337349398,
"alnum_prop": 0.579586478503446,
"repo_name": "djc/couchdb-python",
"id": "0d5b7866e11fd92d29e9018357590a9c2a4d8e43",
"size": "3289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "couchdb/tools/load.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "85"
},
{
"name": "Makefile",
"bytes": "299"
},
{
"name": "Python",
"bytes": "233956"
}
],
"symlink_target": ""
}
|
import logging
from threading import local
from django.db import connection
from django.conf import settings
from django.utils._os import safe_join
from tenant_schemas.postgresql_backend.base import FakeTenant
logger = logging.getLogger(__name__)
class TenantProperties(local):
"""
A tenant property file is read from the MULTI_TENANT_DIR/<tenant_name>/properties.py.
It can contain arbitrary python expressions and a reference to 'settings' will be available.
"""
tenant_properties = {}
def set_tenant(self, tenant):
self.tenant = tenant
self.tenant_properties = {}
# Always default to standard django settings, e.g.
# when tenant has no specific config, has no directory
# or when no MULTI_TENANT_DIR is configured
try:
props_mod = safe_join(settings.MULTI_TENANT_DIR,
tenant.client_name,
"settings.py")
# try to load tenant specific properties. We're using execfile since tenant
# directories are not python packages (e.g. no __init__.py)
execfile(props_mod, dict(settings=settings),
self.tenant_properties)
except (ImportError, AttributeError, IOError):
if not isinstance(tenant, FakeTenant):
logger.debug('No tenant properties found for: {0}'.format(tenant.client_name))
pass
def __getattr__(self, k):
"""
Search (in that specific order) tenant properties and settings.
Raise AttributeError if not found.
"""
try:
return self.tenant_properties[k]
except (AttributeError, KeyError):
# May raise AttributeError which is the behaviour we expect
return getattr(settings, k)
properties = TenantProperties()
class TenantPropertiesMiddleware(object):
def process_request(self, request):
"""
Generically find tenantfolder from request (or some other means)
load tenant specific configuration
"""
try:
tenant = connection.tenant
except AttributeError:
tenant = None
if tenant:
properties.set_tenant(tenant)
|
{
"content_hash": "90b81cfd8447368c7eafe567db364f4b",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 96,
"avg_line_length": 31.845070422535212,
"alnum_prop": 0.6240601503759399,
"repo_name": "jfterpstra/bluebottle",
"id": "4db378d477030701aa97a9dc8ac1b6186c7acca4",
"size": "2261",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "bluebottle/clients/middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16556"
},
{
"name": "HTML",
"bytes": "173443"
},
{
"name": "JavaScript",
"bytes": "434"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "1694079"
},
{
"name": "Shell",
"bytes": "2951"
},
{
"name": "Smarty",
"bytes": "4317"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class CustomdatasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="customdatasrc", parent_name="pointcloud", **kwargs):
super(CustomdatasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
{
"content_hash": "fda80e546382b695375aab5bb874710d",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 88,
"avg_line_length": 37.72727272727273,
"alnum_prop": 0.6385542168674698,
"repo_name": "plotly/plotly.py",
"id": "4db30466e19af7ca1ca5f0cf2ea9caaab0858585",
"size": "415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/pointcloud/_customdatasrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import numpy as np
from sklearn import datasets, cluster, metrics
from sklearn.model_selection import GridSearchCV
class UnsupervisedCV(object):
def __init__(self, n_samples):
self.n_samples = n_samples
def split(self, X=None, y=None, groups=None):
return np.arange(self.n_samples), np.arange(self.n_samples)
def get_n_splits(self, X=None, y=None, groups=None):
return 1
def __len__(self):
return self.get_n_splits()
def __iter__(self):
yield self.split()
def make_clusterer_truth_scorer(metric):
def clusterer_scorer(estimator, X, y):
return metric(y, estimator.labels_)
return cluster_scorer
class _ClusterScorer(object):
def __init__(self, score_func):
self._score_func = score_func
self.sign = 1
def __call__(self, estimator, X):
cluster_labels = estimator.labels_
return self.sign * self._score_func(X, cluster_labels)
def make_cluster_coherence_scorer(metric):
return _ClusterScorer(metric)
def auto_kmeans(X, n_clusters=[2, 3, 4], n_jobs=1):
"""auto_keans.
Fit a KMeans model with various values of `K` and choose the best
value of K based on the best silhoette score. This could be done
in parallel instead of sequential; however, we take advantage of
the parallelism inside the model instead.
"""
best_score = -np.inf
best_clusterer = None
for cluster_k in n_clusters:
cluster_estimator = cluster.KMeans(n_clusters=cluster_k, n_init=5, max_iter=10, n_jobs=n_jobs)
labels = cluster_estimator.fit_predict(X)
score = metrics.silhouette_score(X, labels)
if score > best_score:
best_clusterer = cluster_estimator
best_score = score
return best_clusterer
|
{
"content_hash": "79da3e7db001b41e94c507f71e4741f6",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 102,
"avg_line_length": 28.029411764705884,
"alnum_prop": 0.6594963273871983,
"repo_name": "joshloyal/ClumPy",
"id": "a280bd259b3e5466581cabc16168f9a6d43a16b0",
"size": "1906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clumpy/cluster/auto_kmeans.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2097740"
},
{
"name": "Python",
"bytes": "100611"
}
],
"symlink_target": ""
}
|
from builtins import object
from pyemvtlv.types import tags
td = {getattr(getattr(tags, n), '_tagid', None): getattr(tags, n)
for n in dir(tags) if getattr(getattr(tags, n), '_tagid', None)}
FS = '\x1c'
class Decoder(object):
def __init__(self, taghash):
self.__taghash = taghash
def __call__(self, substrate):
i = substrate.index(FS)
tlv, substrate = substrate[:i], substrate[i + 1:]
tag, length, value = tlv.split(':')
if tag[0] == 'D':
return None, substrate
tagid = tag[1:]
if tagid not in self.__taghash:
raise ValueError('Cannot find tagId {}'.format(tagid))
if value[0] == 'a':
return (self.__taghash[tagid](value=value[1:]), substrate)
else:
return (self.__taghash[tagid](hexvalue=value[1:]), substrate)
decode = Decoder(td)
|
{
"content_hash": "9ee5146c9ea767025d7707e97d82edb3",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 73,
"avg_line_length": 30.137931034482758,
"alnum_prop": 0.5732265446224256,
"repo_name": "mmattice/pyemvtlv",
"id": "b635d8c578102ebbcd25d1f9dd97d2295766c162",
"size": "2325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyemvtlv/codec/ingenico/decoder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "48337"
}
],
"symlink_target": ""
}
|
"""
To run these tests against a live database:
1. Modify the file `keystone/tests/unit/config_files/backend_sql.conf` to use
the connection for your live database.
2. Set up a blank, live database.
3. Run the tests using::
tox -e py27 -- keystone.tests.unit.test_sql_migrate_extensions
WARNING::
Your database will be wiped.
Do not do this against a Database with valuable data as
all data will be lost.
"""
from keystone.contrib import endpoint_filter
from keystone.contrib import federation
from keystone.contrib import oauth1
from keystone.contrib import revoke
from keystone import exception
from keystone.tests.unit import test_sql_upgrade
class SqlUpgradeOAuth1Extension(test_sql_upgrade.SqlMigrateBase):
OAUTH1_MIGRATIONS = 5
def repo_package(self):
return oauth1
def test_upgrade(self):
for version in range(self.OAUTH1_MIGRATIONS):
v = version + 1
self.assertRaises(exception.MigrationMovedFailure,
self.upgrade, version=v,
repository=self.repo_path)
class EndpointFilterExtension(test_sql_upgrade.SqlMigrateBase):
ENDPOINT_FILTER_MIGRATIONS = 2
def repo_package(self):
return endpoint_filter
def test_upgrade(self):
for version in range(self.ENDPOINT_FILTER_MIGRATIONS):
v = version + 1
self.assertRaises(exception.MigrationMovedFailure,
self.upgrade, version=v,
repository=self.repo_path)
class FederationExtension(test_sql_upgrade.SqlMigrateBase):
FEDERATION_MIGRATIONS = 8
def repo_package(self):
return federation
def test_upgrade(self):
for version in range(self.FEDERATION_MIGRATIONS):
v = version + 1
self.assertRaises(exception.MigrationMovedFailure,
self.upgrade, version=v,
repository=self.repo_path)
class RevokeExtension(test_sql_upgrade.SqlMigrateBase):
REVOKE_MIGRATIONS = 2
def repo_package(self):
return revoke
def test_upgrade(self):
for version in range(self.REVOKE_MIGRATIONS):
v = version + 1
self.assertRaises(exception.MigrationMovedFailure,
self.upgrade, version=v,
repository=self.repo_path)
|
{
"content_hash": "53fc4037a063c93cc841341e2cd23ff0",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 77,
"avg_line_length": 28.773809523809526,
"alnum_prop": 0.643773272652048,
"repo_name": "klmitch/keystone",
"id": "125b915055d269a222eed4e0a5d0acdecb13b8ec",
"size": "3002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystone/tests/unit/test_sql_migrate_extensions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "4405298"
}
],
"symlink_target": ""
}
|
import random
import sys
from logging import Logger, LoggerAdapter
from typing import Union
RESET_CODE = '\x1b[m'
class LogDecorator(LoggerAdapter):
"""Decorates log messages with colors in console output."""
def __init__(self, logger: Union[Logger, LoggerAdapter], key: str):
super().__init__(logger, {})
self.key = key
self.color_code = self._random_color_code()
def process(self, msg, kwargs):
colored, uncolored = self._produce_prefix()
result = f'{colored if sys.stderr.isatty() else uncolored} {msg}'
return result, kwargs
@staticmethod
def _random_color_code():
code = random.randint(16, 231) # https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit
return f'\x1b[38;5;{code}m'
def _produce_prefix(self):
if hasattr(super(), '_produce_prefix'):
colored_super, uncolored_super = getattr(super(), '_produce_prefix')()
colored = f'{colored_super} {self.color_code}[{self.key}]{RESET_CODE}'
uncolored = f'{uncolored_super} [{self.key}]'
else:
colored = f'{self.color_code}[{self.key}]{RESET_CODE}'
uncolored = f'[{self.key}]'
return colored, uncolored
|
{
"content_hash": "3bb9245dac27874ee7042be0b1bf2341",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 90,
"avg_line_length": 31.054054054054053,
"alnum_prop": 0.6614447345517842,
"repo_name": "firebase/firebase-android-sdk",
"id": "177f5a1a3ba0b71d0f2e9d2c07a06af8664e38ff",
"size": "1725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ci/fireci/fireciplugins/macrobenchmark/run/log_decorator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AIDL",
"bytes": "1486"
},
{
"name": "C",
"bytes": "6703"
},
{
"name": "C++",
"bytes": "86300"
},
{
"name": "Java",
"bytes": "12705758"
},
{
"name": "JavaScript",
"bytes": "5242"
},
{
"name": "Kotlin",
"bytes": "360846"
},
{
"name": "Makefile",
"bytes": "21550"
},
{
"name": "Mustache",
"bytes": "4739"
},
{
"name": "PureBasic",
"bytes": "10781995"
},
{
"name": "Python",
"bytes": "79496"
},
{
"name": "Ruby",
"bytes": "2545"
},
{
"name": "Shell",
"bytes": "4240"
}
],
"symlink_target": ""
}
|
import numpy as np
import scipy.stats as stats
import sys
# lib eh a nossa biblioteca criada para este trabalho
import lib.naive_bayes as nb
import lib.preprocessing as prep
import lib.validation as valid
from config.constants import *
def case2(indexes=CASE_2_ATTRIBUTE_INDEXES,output=True):
accuracy_in_each_turn = list()
precision_in_each_turn_spam = list()
recall_in_each_turn_spam = list()
precision_in_each_turn_ham = list()
recall_in_each_turn_ham = list()
m = np.loadtxt(open("resources/normalized_data.csv","rb"),delimiter=',')
shuffled = np.random.permutation(m)
valid.validate_cross_validation(NUMBER_OF_ROUNDS,TRAIN_TEST_RATIO)
# equiprobable priors
prior_spam = 0.5
prior_ham = 0.5
for i in xrange(NUMBER_OF_ROUNDS):
# we're using cross-validation so each iteration we take a different
# slice of the data to serve as test set
train_set,test_set = prep.split_sets(shuffled,TRAIN_TEST_RATIO,i)
#parameter estimation
#but now we take 10 attributes into consideration
sample_means_word_spam = list()
sample_means_word_ham = list()
sample_variances_word_spam = list()
sample_variances_word_ham = list()
for attr_index in indexes:
sample_means_word_spam.append(nb.take_mean_spam(train_set,attr_index,SPAM_ATTR_INDEX))
sample_means_word_ham.append(nb.take_mean_ham(train_set,attr_index,SPAM_ATTR_INDEX))
sample_variances_word_spam.append(nb.take_variance_spam(train_set,attr_index,SPAM_ATTR_INDEX))
sample_variances_word_ham.append(nb.take_variance_ham(train_set,attr_index,SPAM_ATTR_INDEX))
#sample standard deviations from sample variances
sample_std_devs_spam = map(lambda x: x ** (1/2.0), sample_variances_word_spam)
sample_std_devs_ham = map(lambda x: x ** (1/2.0), sample_variances_word_ham)
hits = 0.0
misses = 0.0
#number of instances correctly evaluated as spam
correctly_is_spam = 0.0
#total number of spam instances
is_spam = 0.0
#total number of instances evaluated as spam
guessed_spam = 0.0
#number of instances correctly evaluated as ham
correctly_is_ham = 0.0
#total number of ham instances
is_ham = 0.0
#total number of instances evaluated as ham
guessed_ham = 0.0
# now we test the hypothesis against the test set
for row in test_set:
# ou seja, o produto de todas as prob. condicionais das palavras dada a classe
# eu sei que ta meio confuso, mas se olhar com cuidado eh bonito fazer isso tudo numa linha soh! =)
product_of_all_conditional_probs_spam = reduce(lambda acc,cur: acc * stats.cauchy(sample_means_word_spam[cur], sample_std_devs_spam[cur]).pdf(row[indexes[cur]]) , xrange(10), 1)
# nao precisa dividir pelo termo de normalizacao pois so queremos saber qual e o maior!
posterior_spam = prior_spam * product_of_all_conditional_probs_spam
product_of_all_conditional_probs_ham = reduce(lambda acc,cur: acc * stats.cauchy(sample_means_word_ham[cur], sample_std_devs_ham[cur]).pdf(row[indexes[cur]]) , xrange(10), 1)
posterior_ham = prior_ham * product_of_all_conditional_probs_ham
# whichever is greater - that will be our prediction
if posterior_spam > posterior_ham:
guess = 1
else:
guess = 0
if(row[SPAM_ATTR_INDEX] == guess):
hits += 1
else:
misses += 1
# we'll use these to calculate metrics
if (row[SPAM_ATTR_INDEX] == 1 ):
is_spam += 1
if guess == 1:
guessed_spam += 1
correctly_is_spam += 1
else:
guessed_ham += 1
else:
is_ham += 1
if guess == 1:
guessed_spam += 1
else:
guessed_ham += 1
correctly_is_ham += 1
#accuracy = number of correctly evaluated instances/
# number of instances
#
#
accuracy = hits/(hits+misses)
#precision_spam = number of correctly evaluated instances as spam/
# number of spam instances
#
#
# in order to avoid divisions by zero in case nothing was found
if(is_spam == 0):
precision_spam = 0
else:
precision_spam = correctly_is_spam/is_spam
#recall_spam = number of correctly evaluated instances as spam/
# number of evaluated instances como spam
#
#
# in order to avoid divisions by zero in case nothing was found
if(guessed_spam == 0):
recall_spam = 0
else:
recall_spam = correctly_is_spam/guessed_spam
#precision_ham = number of correctly evaluated instances as ham/
# number of ham instances
#
#
# in order to avoid divisions by zero in case nothing was found
if(is_ham == 0):
precision_ham = 0
else:
precision_ham = correctly_is_ham/is_ham
#recall_ham = number of correctly evaluated instances as ham/
# number of evaluated instances como ham
#
#
# in order to avoid divisions by zero in case nothing was found
if(guessed_ham == 0):
recall_ham = 0
else:
recall_ham = correctly_is_ham/guessed_ham
accuracy_in_each_turn.append(accuracy)
precision_in_each_turn_spam.append(precision_spam)
recall_in_each_turn_spam.append(recall_spam)
precision_in_each_turn_ham.append(precision_ham)
recall_in_each_turn_ham.append(recall_ham)
# calculation of means for each metric at the end
mean_accuracy = np.mean(accuracy_in_each_turn)
std_dev_accuracy = np.std(accuracy_in_each_turn)
variance_accuracy = np.var(accuracy_in_each_turn)
mean_precision_spam = np.mean(precision_in_each_turn_spam)
std_dev_precision_spam = np.std(precision_in_each_turn_spam)
variance_precision_spam = np.var(precision_in_each_turn_spam)
mean_recall_spam = np.mean(recall_in_each_turn_spam)
std_dev_recall_spam = np.std(recall_in_each_turn_spam)
variance_recall_spam = np.var(recall_in_each_turn_spam)
mean_precision_ham = np.mean(precision_in_each_turn_ham)
std_dev_precision_ham = np.std(precision_in_each_turn_ham)
variance_precision_ham = np.var(precision_in_each_turn_ham)
mean_recall_ham = np.mean(recall_in_each_turn_ham)
std_dev_recall_ham = np.std(recall_in_each_turn_ham)
variance_recall_ham = np.var(recall_in_each_turn_ham)
if output:
print "\033[1;32m"
print '============================================='
print 'CASE 2 - TEN ATTRIBUTES - USING CAUCHY MODEL'
print '============================================='
print "\033[00m"
print 'MEAN ACCURACY: '+str(round(mean_accuracy,5))
print 'STD. DEV. OF ACCURACY: '+str(round(std_dev_accuracy,5))
print 'VARIANCE OF ACCURACY: '+str(round(variance_accuracy,8))
print ''
print 'MEAN PRECISION FOR SPAM: '+str(round(mean_precision_spam,5))
print 'STD. DEV. OF PRECISION FOR SPAM: '+str(round(std_dev_precision_spam,5))
print 'VARIANCE OF PRECISION FOR SPAM: '+str(round(variance_precision_spam,8))
print ''
print 'MEAN RECALL FOR SPAM: '+str(round(mean_recall_spam,5))
print 'STD. DEV. OF RECALL FOR SPAM: '+str(round(std_dev_recall_spam,5))
print 'VARIANCE OF RECALL FOR SPAM: '+str(round(variance_recall_spam,8))
print ''
print 'MEAN PRECISION FOR HAM: '+str(round(mean_precision_ham,5))
print 'STD. DEV. OF PRECISION FOR HAM: '+str(round(std_dev_precision_ham,5))
print 'VARIANCE OF PRECISION FOR HAM: '+str(round(variance_precision_ham,8))
print ''
print 'MEAN RECALL FOR HAM: '+str(round(mean_recall_ham,5))
print 'STD. DEV. OF RECALL FOR HAM: '+str(round(std_dev_recall_ham,5))
print 'VARIANCE OF RECALL FOR HAM: '+str(round(variance_recall_ham,8))
case2()
|
{
"content_hash": "f63800c146d8b7d33975a0418069b8ea",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 189,
"avg_line_length": 36.54978354978355,
"alnum_prop": 0.6008527774487741,
"repo_name": "queirozfcom/spam-filter",
"id": "a9409ae93e931598bca9cc75ed117745a8e1f952",
"size": "8443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "case2_naive_cauchy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "151648"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class UsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="usrc", parent_name="cone", **kwargs):
super(UsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
{
"content_hash": "a5ec7441a4682dd1dfe0e899c9504647",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 73,
"avg_line_length": 34.72727272727273,
"alnum_prop": 0.6073298429319371,
"repo_name": "plotly/plotly.py",
"id": "b3f75fb6fa292d7635dc473afac4e825651a17d2",
"size": "382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/cone/_usrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
import sys, os.path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from dpark import DparkContext
from dpark.bagel import Vertex, Edge, Bagel
def parse_vertex(line, numV):
fields = line.split(' ')
title, refs = fields[0], fields[1:]
outEdges = [Edge(ref) for ref in refs]
return (title, Vertex(title, 1.0 / numV, outEdges, True))
def gen_compute(num, epsilon):
def compute(self, messageSum, agg, superstep):
if messageSum and messageSum[0]:
newValue = 0.15 / num + 0.85 * messageSum[0]
else:
newValue = self.value
terminate = (superstep >= 10 and abs(newValue - self.value) < epsilon) or superstep > 30
outbox = [(edge.target_id, newValue / len(self.outEdges))
for edge in self.outEdges] if not terminate else []
return Vertex(self.id, newValue, self.outEdges, not terminate), outbox
return compute
if __name__ == '__main__':
inputFile = 'wikipedia.txt'
threshold = 0.01
dpark = DparkContext()
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), inputFile)
input = dpark.textFile(path)
numVertex = input.count()
vertices = input.map(lambda line: parse_vertex(line, numVertex)).cache()
epsilon = 0.01 / numVertex
messages = dpark.parallelize([])
result = Bagel.run(dpark, vertices, messages,
gen_compute(numVertex, epsilon))
for id, v in result.filter(lambda id_v: id_v[1].value > threshold).collect():
print(id, v)
|
{
"content_hash": "71157fb2421ffe898879b03ae2bbfa05",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 96,
"avg_line_length": 34.659574468085104,
"alnum_prop": 0.6390423572744015,
"repo_name": "douban/dpark",
"id": "61348fa17f476081fcc301ccf6ec63a3726e9f18",
"size": "1651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/pagerank.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "12283"
},
{
"name": "CSS",
"bytes": "2638"
},
{
"name": "Dockerfile",
"bytes": "1378"
},
{
"name": "HTML",
"bytes": "9696"
},
{
"name": "JavaScript",
"bytes": "25347"
},
{
"name": "Python",
"bytes": "672082"
},
{
"name": "Shell",
"bytes": "1865"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..utils import int_or_none
class LiveLeakIE(InfoExtractor):
_VALID_URL = r'^(?:http://)?(?:\w+\.)?liveleak\.com/view\?(?:.*?)i=(?P<video_id>[\w_]+)(?:.*)'
_TESTS = [{
'url': 'http://www.liveleak.com/view?i=757_1364311680',
'md5': '0813c2430bea7a46bf13acf3406992f4',
'info_dict': {
'id': '757_1364311680',
'ext': 'mp4',
'description': 'extremely bad day for this guy..!',
'uploader': 'ljfriel2',
'title': 'Most unlucky car accident'
}
}, {
'url': 'http://www.liveleak.com/view?i=f93_1390833151',
'md5': 'd3f1367d14cc3c15bf24fbfbe04b9abf',
'info_dict': {
'id': 'f93_1390833151',
'ext': 'mp4',
'description': 'German Television Channel NDR does an exclusive interview with Edward Snowden.\r\nUploaded on LiveLeak cause German Television thinks the rest of the world isn\'t intereseted in Edward Snowden.',
'uploader': 'ARD_Stinkt',
'title': 'German Television does first Edward Snowden Interview (ENGLISH)',
}
}, {
'url': 'http://www.liveleak.com/view?i=4f7_1392687779',
'md5': '42c6d97d54f1db107958760788c5f48f',
'info_dict': {
'id': '4f7_1392687779',
'ext': 'mp4',
'description': "The guy with the cigarette seems amazingly nonchalant about the whole thing... I really hope my friends' reactions would be a bit stronger.\r\n\r\nAction-go to 0:55.",
'uploader': 'CapObveus',
'title': 'Man is Fatally Struck by Reckless Car While Packing up a Moving Truck',
'age_limit': 18,
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('video_id')
webpage = self._download_webpage(url, video_id)
video_title = self._og_search_title(webpage).replace('LiveLeak.com -', '').strip()
video_description = self._og_search_description(webpage)
video_uploader = self._html_search_regex(
r'By:.*?(\w+)</a>', webpage, 'uploader', fatal=False)
age_limit = int_or_none(self._search_regex(
r'you confirm that you are ([0-9]+) years and over.',
webpage, 'age limit', default=None))
sources_raw = self._search_regex(
r'(?s)sources:\s*(\[.*?\]),', webpage, 'video URLs', default=None)
if sources_raw is None:
alt_source = self._search_regex(
r'(file: ".*?"),', webpage, 'video URL', default=None)
if alt_source:
sources_raw = '[{ %s}]' % alt_source
else:
# Maybe an embed?
embed_url = self._search_regex(
r'<iframe[^>]+src="(http://www.prochan.com/embed\?[^"]+)"',
webpage, 'embed URL')
return {
'_type': 'url_transparent',
'url': embed_url,
'id': video_id,
'title': video_title,
'description': video_description,
'uploader': video_uploader,
'age_limit': age_limit,
}
sources_json = re.sub(r'\s([a-z]+):\s', r'"\1": ', sources_raw)
sources = json.loads(sources_json)
formats = [{
'format_note': s.get('label'),
'url': s['file'],
} for s in sources]
self._sort_formats(formats)
return {
'id': video_id,
'title': video_title,
'description': video_description,
'uploader': video_uploader,
'formats': formats,
'age_limit': age_limit,
}
|
{
"content_hash": "3751143355f7f70fde3e252f841ffe5e",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 223,
"avg_line_length": 40.145833333333336,
"alnum_prop": 0.5228334198235599,
"repo_name": "Celthi/youtube-dl-GUI",
"id": "b04be1e8cfda94addca26a1d1e3731ce61519dc1",
"size": "3854",
"binary": false,
"copies": "21",
"ref": "refs/heads/master",
"path": "youtube_dl/extractor/liveleak.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Inno Setup",
"bytes": "7102"
},
{
"name": "Python",
"bytes": "2064276"
}
],
"symlink_target": ""
}
|
from app import db
class Asset(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True)
bytes = db.Column(db.Integer)
batch_id = db.Column(db.Integer, db.ForeignKey('batch.id'))
def __repr__(self):
return '<Asset %r>' % (self.name)
class Batches(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
date = db.Column(db.DateTime)
def __repr__(self):
return '<Batch %r>' % (self.name)
|
{
"content_hash": "1ae1c96d92e37ebe75ef3596c8e3edcd",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 63,
"avg_line_length": 25.142857142857142,
"alnum_prop": 0.6041666666666666,
"repo_name": "jwestgard/elk",
"id": "055580b1950428e8c9061ea3fead76ca844cb2ad",
"size": "528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1051"
},
{
"name": "Python",
"bytes": "1802"
}
],
"symlink_target": ""
}
|
"""Functional tests for depthwise convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def ConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[4, 5, 5, 48], [4, 8, 8, 84], [4, 17, 17, 48], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 147, 147, 2],
[3, 299, 299, 3], [5, 183, 183, 1]]
filter_sizes = [[1, 1, 48, 2], [1, 3, 84, 1], [3, 1, 48, 4], [3, 3, 8, 1],
[3, 3, 7, 1], [5, 5, 2, 1], [3, 3, 2, 8], [2, 2, 3,
8], [5, 5, 1, 2]]
out_sizes = [[4, 5, 5, 96], [4, 8, 8, 84], [4, 17, 17, 192], [4, 9, 27, 8],
[4, 31, 31, 7], [4, 35, 35, 2], [4, 49, 49, 16],
[3, 150, 150, 24], [5, 92, 92, 2]]
strides = [1, 1, 1, 1, 1, 1, 3, 2, 2]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, SAME, SAME, SAME, SAME, SAME, VALID, SAME, SAME, SAME]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def CheckGradConfigsToTest():
"""Iterator for different convolution shapes, strides and paddings.
compute_gradient_error() is very expensive. So the configs should be
relatively small.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the depthwise
convolution parameters.
"""
input_sizes = [[2, 5, 8, 1], [4, 5, 5, 1], [2, 4, 4, 2], [1, 15, 15, 2],
[2, 15, 16, 1]]
filter_sizes = [[4, 4, 1, 2], [2, 2, 1, 2], [3, 1, 2, 2], [1, 3, 2, 1],
[3, 3, 1, 2]]
out_sizes = [[2, 5, 8, 2], [4, 2, 2, 2], [2, 4, 4, 4], [1, 15, 15, 2],
[2, 5, 5, 2]]
strides = [1, 2, 1, 1, 3]
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, VALID, SAME, SAME, VALID]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
class DepthwiseConv2DTest(test.TestCase):
# This is testing that depthwise_conv2d and depthwise_conv2d_native
# produce the same results. It also tests that NCHW and NHWC
# formats agree, by comparing the depthwise_conv2d_native with
# 'NCHW' format (with transposition) matches the 'NHWC' format using
# the higher level interface.
def _VerifyValues(self,
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_type,
use_gpu,
grouped_conv=False,
data_format="NHWC"):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
data_type: The data type to use.
use_gpu: Whether to use GPU.
grouped_conv: Whether to use cuDNN 7's grouped convolution.
data_format: The data_format of the input. "NHWC" or "NCHW".
"""
input_size = 1
filter_size = 1
for s in tensor_in_sizes:
input_size *= s
for s in filter_in_sizes:
filter_size *= s
# Initializes the input and filter tensor with numbers incrementing from 1.
x1 = [f * 1.0 / input_size for f in range(1, input_size + 1)]
x2 = [f * 1.0 / filter_size for f in range(1, filter_size + 1)]
ops.reset_default_graph()
graph = ops.get_default_graph()
with self.session(graph=graph, use_gpu=use_gpu) as sess:
tolerance = {
dtypes.float16: 4e-2,
dtypes.float32: 1e-5,
dtypes.float64: 1e-12,
}[data_type]
t1 = constant_op.constant(x1, shape=tensor_in_sizes, dtype=data_type)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes, dtype=data_type)
native_t1 = t1
strides = [1, stride, stride, 1]
if data_format == "NCHW":
# Transpose from NHWC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t1 = array_ops.transpose(t1, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with sess.graph._kernel_label_map({
"DepthwiseConv2dNative": "cudnn_grouped_convolution"
} if grouped_conv else {}):
conv_native = nn_ops.depthwise_conv2d_native(
native_t1,
t2,
strides=strides,
data_format=data_format,
padding=padding)
if data_format == "NCHW":
# Transpose back from NCHW to NHWC
conv_native = array_ops.transpose(conv_native, [0, 2, 3, 1])
try:
native_result = self.evaluate(conv_native)
except errors.InvalidArgumentError as e:
# Grouped convolution kernel is only registered for cuDNN 7. Silently
# return when we are running on an earlier version or without GPU.
if e.message.startswith(
"No OpKernel was registered to support Op 'DepthwiseConv2dNative'"):
tf_logging.warn("Skipping grouped convolution test")
return
raise e
conv_interface = nn_impl.depthwise_conv2d(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
interface_result = self.evaluate(conv_interface)
tf_logging.info(
"data_type: %r, use_gpu: %r, grouped_conv: %r, max diff = %f",
data_type, use_gpu, grouped_conv,
np.amax(np.absolute(native_result - interface_result)))
self.assertArrayNear(
np.ravel(native_result), np.ravel(interface_result), tolerance)
self.assertShapeEqual(native_result, conv_native)
self.assertShapeEqual(native_result, conv_interface)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2D(self):
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2D, %dth config: %r * %r, stride: %d, padding: "
"%s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
tf_logging.info("Testing without grouped_conv")
self._VerifyValues(
input_size, filter_size, stride, padding, data_type, use_gpu=True)
tf_logging.info("Testing with grouped_conv")
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
data_type,
use_gpu=True,
grouped_conv=True)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DWithUnknownShape(self):
# GitHub issue 22110.
if not test.is_gpu_available():
return
with self.session(use_gpu=True):
x = array_ops.placeholder(dtypes.float32)
f = np.ones([1, 1, 1, 1], np.float32)
v = nn_impl.depthwise_conv2d(
x, f, [1, 1, 1, 1], "VALID", rate=[2, 1], data_format="NCHW")
self.assertAllEqual(
np.ones([1, 1, 1, 1], np.float32),
v.eval(feed_dict={x: np.ones([1, 1, 1, 1], np.float32)}))
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DFormat(self):
if not test.is_gpu_available():
return
for index, (input_size, filter_size, _, stride,
padding) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFormat, %dth config: %r * %r, stride: %d, "
"padding: %s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._VerifyValues(
input_size,
filter_size,
stride,
padding,
data_type,
use_gpu=True,
data_format="NCHW")
# This is testing against hand calculated results.
def _VerifyHandValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected, use_gpu):
"""Verifies the output values of the depthwise convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
use_gpu: Whether to use GPU.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.cached_session(use_gpu=use_gpu) as sess:
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
conv = nn_ops.depthwise_conv2d_native(
t1, t2, strides=[1, stride, stride, 1], padding=padding)
value = self.evaluate(conv)
tf_logging.info("value = %r", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output,
use_gpu=False)
self._VerifyHandValues(
tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1,
padding="VALID",
expected=expected_output,
use_gpu=True)
# Gradient checkers. This tests depthwise gradient computations for both
# BackpropFilter and BackpropInput by comparing gradients computed by the
# depthwise gradient ops with the gradients computed numerically (details can
# be found in the compute_gradient_error().
# Note this check is very expensive so the input should not be too big.
def _ConstructAndTestGradient(self,
input_shape,
filter_shape,
output_shape,
stride,
padding,
data_type,
test_input,
use_gpu,
grouped_conv=False,
data_format="NHWC"):
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
ops.reset_default_graph()
graph = ops.get_default_graph()
with self.session(graph=graph, use_gpu=use_gpu) as sess:
tolerance = {
dtypes.float16: 4e-0,
dtypes.float32: 8e-4,
dtypes.float64: 1e-12,
}[data_type]
input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=data_type, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=data_type, name="filter")
native_input = input_tensor
strides = [1, stride, stride, 1]
if data_format == "NCHW":
# Transpose from NHWC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_input = array_ops.transpose(input_tensor, [0, 3, 1, 2])
input_shape = [
input_shape[0], input_shape[3], input_shape[1], input_shape[2]
]
output_shape = [
output_shape[0], output_shape[3], output_shape[1], output_shape[2]
]
strides = [1, 1, stride, stride]
with sess.graph._kernel_label_map({
"DepthwiseConv2dNative": "cudnn_grouped_convolution",
"DepthwiseConv2dNativeBackpropInput": "cudnn_grouped_convolution",
"DepthwiseConv2dNativeBackpropFilter": "cudnn_grouped_convolution",
} if grouped_conv else {}):
depthwise_conv2d = nn_ops.depthwise_conv2d_native(
native_input,
filter_tensor,
strides,
padding,
data_format=data_format,
name="depthwise_conv2d")
self.assertEqual(output_shape, depthwise_conv2d.get_shape())
try:
if test_input:
err = gradient_checker.compute_gradient_error(
native_input, input_shape, depthwise_conv2d, output_shape)
else:
err = gradient_checker.compute_gradient_error(
filter_tensor, filter_shape, depthwise_conv2d, output_shape)
except errors.InvalidArgumentError as e:
# Grouped convolution kernel is only registered for cuDNN 7. Silently
# return when we are running on an earlier version or without GPU.
if grouped_conv and e.message.startswith(
"No OpKernel was registered to support Op 'DepthwiseConv2dNative'"):
tf_logging.warn("Skipping grouped convolution test")
return
raise e
tf_logging.info(
"data_type: %r, use_gpu: %r, grouped_conv: %r, error = %f", data_type,
use_gpu, grouped_conv, err)
self.assertLess(err, tolerance)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DInputGrad(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DInputGrad, %dth config: %r * %r, stride: %d, "
"padding: %s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=True,
use_gpu=True)
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=True,
use_gpu=True,
grouped_conv=True)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DInputGradFormat(self):
if not test.is_gpu_available():
return
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DInputGradFormat, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=True,
use_gpu=True,
data_format="NCHW")
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DFilterGrad(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFilterGrad, %dth config: %r * %r, stride: "
"%d, padding: %s", index, input_size, filter_size, stride, padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=False,
use_gpu=True)
@test_util.run_v1_only("b/120545219")
def testDepthwiseConv2DFilterGradFormat(self):
if not test.is_gpu_available():
return
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(CheckGradConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFilterGradFormat, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
for data_type in [dtypes.float32, dtypes.float64]:
self._ConstructAndTestGradient(
input_size,
filter_size,
output_size,
stride,
padding,
data_type,
test_input=False,
use_gpu=True,
data_format="NCHW")
def _CompareBackpropInputFloat(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_gpu):
with self.cached_session(use_gpu=use_gpu):
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = self.evaluate(backprop)
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def _CompareBackpropInputDouble(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float64)
x2 = np.random.rand(*output_sizes).astype(np.float64)
def _GetVal(use_gpu):
with self.cached_session(use_gpu=use_gpu):
t0 = constant_op.constant(input_sizes, shape=[len(input_sizes)])
t1 = constant_op.constant(x1, shape=filter_sizes)
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_input(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = self.evaluate(backprop)
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def testDepthwiseConv2DInputGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DInputGradCompare, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
self._CompareBackpropInputFloat(input_size, filter_size, output_size,
stride, padding)
self._CompareBackpropInputDouble(input_size, filter_size, output_size,
stride, padding)
def _CompareBackpropFilterFloat(self, input_sizes, filter_sizes, output_sizes,
stride, padding):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(use_gpu):
with self.cached_session(use_gpu=use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = self.evaluate(backprop)
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def _CompareBackpropFilterDouble(self, input_sizes, filter_sizes,
output_sizes, stride, padding):
x0 = np.random.rand(*input_sizes).astype(np.float64)
x2 = np.random.rand(*output_sizes).astype(np.float64)
def _GetVal(use_gpu):
with self.cached_session(use_gpu=use_gpu):
t0 = constant_op.constant(x0, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = constant_op.constant(x2, shape=output_sizes)
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
t0, t1, t2, strides=[1, stride, stride, 1], padding=padding)
ret = self.evaluate(backprop)
self.assertShapeEqual(ret, backprop)
return ret
gpu_value = _GetVal(use_gpu=True)
cpu_value = _GetVal(use_gpu=False)
self.assertAllClose(cpu_value, gpu_value, rtol=1e-4, atol=1e-4)
def testDepthwiseConv2DFilterGradCompare(self):
for index, (input_size, filter_size, output_size, stride,
padding) in enumerate(ConfigsToTest()):
tf_logging.info(
"Testing DepthwiseConv2DFilterGradCompare, %dth config: %r * %r, "
"stride: %d, padding: %s", index, input_size, filter_size, stride,
padding)
self._CompareBackpropFilterFloat(input_size, filter_size, output_size,
stride, padding)
self._CompareBackpropFilterDouble(input_size, filter_size, output_size,
stride, padding)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "528d96ac57e060b5a449da578ddcd47d",
"timestamp": "",
"source": "github",
"line_count": 619,
"max_line_length": 80,
"avg_line_length": 39.25686591276252,
"alnum_prop": 0.5837037037037037,
"repo_name": "alsrgv/tensorflow",
"id": "0717b058f47f6d63a330459511a236e8a5479528",
"size": "24989",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/depthwise_conv_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "755360"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "68001148"
},
{
"name": "CMake",
"bytes": "204596"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1627121"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "842866"
},
{
"name": "Jupyter Notebook",
"bytes": "1665584"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "101157"
},
{
"name": "Objective-C",
"bytes": "104061"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17570"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48843099"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "488241"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
},
{
"name": "TSQL",
"bytes": "921"
}
],
"symlink_target": ""
}
|
"""Tests for recurrent layers functionality other than GRU, LSTM, SimpleRNN.
See also: lstm_test.py, gru_test.py, simplernn_test.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import test
from tensorflow.python.training.checkpointable import util as checkpointable_util
class RNNTest(test.TestCase):
def test_minimal_rnn_cell_non_layer(self):
class MinimalRNNCell(object):
def __init__(self, units, input_dim):
self.units = units
self.state_size = units
self.kernel = keras.backend.variable(
np.random.random((input_dim, units)))
def call(self, inputs, states):
prev_output = states[0]
output = keras.backend.dot(inputs, self.kernel) + prev_output
return output, [output]
with self.test_session():
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8, 5),
MinimalRNNCell(32, 8),
MinimalRNNCell(32, 32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_non_layer_multiple_states(self):
class MinimalRNNCell(object):
def __init__(self, units, input_dim):
self.units = units
self.state_size = (units, units)
self.kernel = keras.backend.variable(
np.random.random((input_dim, units)))
def call(self, inputs, states):
prev_output_1 = states[0]
prev_output_2 = states[1]
output = keras.backend.dot(inputs, self.kernel)
output += prev_output_1
output -= prev_output_2
return output, [output * 2, output * 3]
with self.test_session():
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8, 5),
MinimalRNNCell(16, 8),
MinimalRNNCell(32, 16)]
layer = keras.layers.RNN(cells)
assert layer.cell.state_size == (32, 32, 16, 16, 8, 8)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_layer(self):
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(prev_output, self.recurrent_kernel)
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(MinimalRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
with self.test_session():
# Test basic case.
x = keras.Input((None, 5))
cell = MinimalRNNCell(32)
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test stacking.
cells = [MinimalRNNCell(8),
MinimalRNNCell(12),
MinimalRNNCell(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
def test_rnn_cell_with_constants_layer(self):
class RNNCellWithConstants(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
if not isinstance(input_shape, list):
raise TypeError('expects constants shape')
[input_shape, constant_shape] = input_shape
# will (and should) raise if more than one constant passed
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(constant_shape[-1], self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = keras.backend.dot(inputs, self.input_kernel)
h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
h_const = keras.backend.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
with self.test_session():
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
cell = RNNCellWithConstants(32)
layer = keras.layers.RNN(cell)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 32))
)
with self.test_session():
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
with self.test_session():
# test flat list inputs.
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer([x, c])
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
with self.test_session():
# Test stacking.
cells = [keras.layers.recurrent.GRUCell(8),
RNNCellWithConstants(12),
RNNCellWithConstants(32)]
layer = keras.layers.recurrent.RNN(cells)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 32))
)
with self.test_session():
# Test GRUCell reset_after property.
x = keras.Input((None, 5))
c = keras.Input((3,))
cells = [keras.layers.recurrent.GRUCell(32, reset_after=True)]
layer = keras.layers.recurrent.RNN(cells)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 32))
)
with self.test_session():
# Test stacked RNN serialization
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.recurrent.RNN.from_config(config.copy())
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
def test_rnn_cell_with_constants_layer_passing_initial_state(self):
class RNNCellWithConstants(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
if not isinstance(input_shape, list):
raise TypeError('expects constants shape')
[input_shape, constant_shape] = input_shape
# will (and should) raise if more than one constant passed
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(constant_shape[-1], self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = keras.backend.dot(inputs, self.input_kernel)
h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
h_const = keras.backend.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
with self.test_session():
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
s = keras.Input((32,))
cell = RNNCellWithConstants(32)
layer = keras.layers.RNN(cell)
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 32)), np.zeros((6, 3))],
np.zeros((6, 32))
)
with self.test_session():
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
s_np = np.random.random((6, 32))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, s_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, s_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# verify that state is used
y_np_2_different_s = model.predict([x_np, s_np + 10., c_np])
with self.assertRaises(AssertionError):
self.assertAllClose(y_np, y_np_2_different_s, atol=1e-4)
with self.test_session():
# test flat list inputs
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer([x, s, c])
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, s_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
def test_stacked_rnn_attributes(self):
cells = [keras.layers.LSTMCell(1),
keras.layers.LSTMCell(1)]
layer = keras.layers.RNN(cells)
layer.build((None, None, 1))
# Test weights
self.assertEqual(len(layer.trainable_weights), 6)
cells[0].trainable = False
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 3)
# Test `get_losses_for` and `losses`
x = keras.Input((None, 1))
loss_1 = math_ops.reduce_sum(x)
loss_2 = math_ops.reduce_sum(cells[0].kernel)
cells[0].add_loss(loss_1, inputs=x)
cells[0].add_loss(loss_2)
self.assertEqual(len(layer.losses), 2)
self.assertEqual(layer.get_losses_for(None), [loss_2])
self.assertEqual(layer.get_losses_for(x), [loss_1])
# Test `get_updates_for` and `updates`
cells = [keras.layers.LSTMCell(1),
keras.layers.LSTMCell(1)]
layer = keras.layers.RNN(cells)
layer.build((None, None, 1))
x = keras.Input((None, 1))
update_1 = state_ops.assign_add(cells[0].kernel,
x[0, 0, 0] * cells[0].kernel)
update_2 = state_ops.assign_add(cells[0].kernel,
array_ops.ones_like(cells[0].kernel))
cells[0].add_update(update_1, inputs=x)
cells[0].add_update(update_2)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.get_updates_for(None)), 1)
self.assertEqual(len(layer.get_updates_for(x)), 1)
def test_rnn_dynamic_trainability(self):
layer_class = keras.layers.SimpleRNN
embedding_dim = 4
units = 3
layer = layer_class(units)
layer.build((None, None, embedding_dim))
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
layer.trainable = False
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.non_trainable_weights), 3)
layer.trainable = True
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
def test_state_reuse_with_dropout(self):
layer_class = keras.layers.SimpleRNN
embedding_dim = 4
units = 3
timesteps = 2
num_samples = 2
with self.test_session():
input1 = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = layer_class(units,
return_state=True,
return_sequences=True,
dropout=0.2)
state = layer(input1)[1:]
input2 = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
output = layer_class(units)(input2, initial_state=state)
model = keras.Model([input1, input2], output)
inputs = [np.random.random((num_samples, timesteps, embedding_dim)),
np.random.random((num_samples, timesteps, embedding_dim))]
model.predict(inputs)
def test_builtin_rnn_cell_serialization(self):
for cell_class in [keras.layers.SimpleRNNCell,
keras.layers.GRUCell,
keras.layers.LSTMCell]:
with self.test_session():
# Test basic case.
x = keras.Input((None, 5))
cell = cell_class(32)
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test stacking.
cells = [cell_class(8),
cell_class(12),
cell_class(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
def test_stacked_rnn_dropout(self):
cells = [keras.layers.LSTMCell(3, dropout=0.1, recurrent_dropout=0.1),
keras.layers.LSTMCell(3, dropout=0.1, recurrent_dropout=0.1)]
layer = keras.layers.RNN(cells)
with self.test_session():
x = keras.Input((None, 5))
y = layer(x)
model = keras.models.Model(x, y)
model.compile('sgd', 'mse')
x_np = np.random.random((6, 5, 5))
y_np = np.random.random((6, 3))
model.train_on_batch(x_np, y_np)
def test_stacked_rnn_compute_output_shape(self):
cells = [keras.layers.LSTMCell(3),
keras.layers.LSTMCell(6)]
embedding_dim = 4
timesteps = 2
layer = keras.layers.RNN(cells, return_state=True, return_sequences=True)
output_shape = layer.compute_output_shape((None, timesteps, embedding_dim))
expected_output_shape = [(None, timesteps, 6),
(None, 6),
(None, 6),
(None, 3),
(None, 3)]
self.assertEqual(
[tuple(o.as_list()) for o in output_shape],
expected_output_shape)
def test_checkpointable_dependencies(self):
rnn = keras.layers.SimpleRNN
with self.test_session():
x = np.random.random((2, 2, 2))
y = np.random.random((2, 2))
model = keras.models.Sequential()
model.add(rnn(2))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(x, y, epochs=1, batch_size=1)
# check whether the model variables are present in the
# checkpointable list of objects
checkpointed_objects = set(checkpointable_util.list_objects(model))
for v in model.variables:
self.assertIn(v, checkpointed_objects)
if __name__ == '__main__':
test.main()
|
{
"content_hash": "72bd548c929bd41ef60056ac9a9a3792",
"timestamp": "",
"source": "github",
"line_count": 564,
"max_line_length": 81,
"avg_line_length": 36.273049645390074,
"alnum_prop": 0.5977612669860202,
"repo_name": "jart/tensorflow",
"id": "fefb92826b33b65a14ba667207995b6e4194c202",
"size": "21147",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/layers/recurrent_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "310149"
},
{
"name": "C++",
"bytes": "44871792"
},
{
"name": "CMake",
"bytes": "206735"
},
{
"name": "Go",
"bytes": "1163781"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "799574"
},
{
"name": "Jupyter Notebook",
"bytes": "2455980"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52050"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99265"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "38792793"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "447966"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
}
|
"""
Application main for the mixmind app
"""
import os
import random
import datetime
import tempfile
import urllib.request, urllib.parse, urllib.error
import codecs
import pendulum
from functools import wraps
from flask import g, render_template, flash, request, send_file, jsonify, redirect, url_for, after_this_request
from flask_security import login_required, roles_required, roles_accepted
from flask_security.decorators import _get_unauthorized_view
from flask_login import current_user
from .notifier import send_mail
from .forms import DrinksForm, OrderForm, OrderFormAnon, RecipeForm, RecipeListSelector, BarstockForm, UploadBarstockForm, LoginForm, CreateBarForm, EditBarForm, EditUserForm, SetBarOwnerForm
from .authorization import user_datastore
from .barstock import Barstock_SQL, Ingredient, _update_computed_fields
from .formatted_menu import filename_from_options, generate_recipes_pdf
from .compose_html import recipe_as_html, users_as_table, orders_as_table, bars_as_table
from .util import filter_recipes, DisplayOptions, FilterOptions, PdfOptions, load_recipe_json, report_stats, convert_units
from .database import db
from .models import User, Order, Bar
from . import app, mms, current_bar
from .logger import get_logger
log = get_logger(__name__)
"""
BUGS:
NOTES:
* admin pages
- raise 404 on not authorized
- add/remove recipes as raw json
- ace embeddable text editor
- menu_generator
* better commits to db with after_this_request
* menu schemas
- would be able to include definitive item lists for serving, ice, tag, etc.
* hardening
- test error handling
"""
@app.before_request
def initialize_shared_data():
g.bar_id = current_bar.id
def get_form(form_class):
"""WTForms update 2.2 breaks when an empty request.form
is given to it """
if not request.form:
return form_class()
return form_class(request.form)
def bundle_options(tuple_class, args):
return tuple_class(*(getattr(args, field).data for field in tuple_class._fields))
def recipes_from_options(form, display_opts=None, filter_opts=None, to_html=False, order_link=False, convert_to=None, **kwargs_for_html):
""" Apply display formmatting, filtering, sorting to
the currently loaded recipes.
Also can generate stats
May convert to html, including extra options for that
Apply sorting
"""
display_options = bundle_options(DisplayOptions, form) if not display_opts else display_opts
filter_options = bundle_options(FilterOptions, form) if not filter_opts else filter_opts
recipes, excluded = filter_recipes(mms.processed_recipes(current_bar), filter_options, union_results=bool(filter_options.search))
if form.sorting.data and form.sorting.data != 'None': # TODO this is weird
reverse = 'X' in form.sorting.data
attr = 'avg_{}'.format(form.sorting.data.rstrip('X'))
recipes = sorted(recipes, key=lambda r: getattr(r.stats, attr), reverse=reverse)
if convert_to:
[r.convert(convert_to) for r in recipes]
if display_options.stats and recipes:
stats = report_stats(recipes, as_html=True)
else:
stats = None
# TODO this can certainly be cached
if to_html:
if order_link:
recipes = [recipe_as_html(recipe, display_options,
order_link="/order/{}".format(urllib.parse.quote_plus(recipe.name)),
**kwargs_for_html) for recipe in recipes]
else:
recipes = [recipe_as_html(recipe, display_options, **kwargs_for_html) for recipe in recipes]
return recipes, excluded, stats
def get_tmp_file():
""" Get a temporary file that will be removed by a callback after
the current request
:returns: file name as a string
"""
_, tmp_filename = tempfile.mkstemp()
@after_this_request
def rm_tempfile(response):
try:
os.remove(tmp_filename)
except OSError as e:
log.warning("OSError: Failed to rm tmp file {}: {}".format(tmp_filename, e))
return response
return tmp_filename
################################################################################
# Customer routes
################################################################################
@app.route("/", methods=['GET', 'POST'])
def browse():
form = get_form(DrinksForm)
filter_options = None
if request.method == 'GET':
# filter for current recipes that can be made on the core list
filter_options = FilterOptions(search="",all_=False,include="",exclude="",include_use_or=False,exclude_use_or=False,style="",glass="",prep="",ice="",tag="core")
display_opts = DisplayOptions(
prices=current_bar.prices,
stats=False,
examples=current_bar.examples,
all_ingredients=False,
markup=current_bar.markup,
prep_line=current_bar.prep_line,
origin=current_bar.origin,
info=current_bar.info,
variants=current_bar.variants)
recipes, _, _ = recipes_from_options(form, display_opts=display_opts, filter_opts=filter_options,
to_html=True, order_link=True, convert_to=current_bar.convert, condense_ingredients=current_bar.summarize)
if request.method == 'POST':
if form.validate():
n_results = len(recipes)
if n_results > 0:
if 'surprise-menu' in request.form:
recipes = [random.choice(recipes)]
flash("Bartender's choice! Just try again if you want something else!")
else:
flash("Filters applied. Showing {} available recipes".format(n_results), 'success')
else:
flash("No results after filtering, try being less specific", 'warning')
else:
flash("Error in form validation", 'danger')
return render_template('browse.html', form=form, recipes=recipes)
@app.route("/order/<recipe_name>", methods=['GET', 'POST'])
def order(recipe_name):
if current_user.is_authenticated:
form = get_form(OrderForm)
else:
form = get_form(OrderFormAnon)
recipe_name = urllib.parse.unquote_plus(recipe_name)
show_form = False
heading = "Order:"
recipe = mms.find_recipe(current_bar, recipe_name)
recipe.convert('oz')
if not recipe:
flash('Error: unknown recipe "{}"'.format(recipe_name), 'danger')
return render_template('result.html', heading=heading)
else:
recipe_html = recipe_as_html(recipe, DisplayOptions(
prices=current_bar.prices,
stats=False,
examples=True,
all_ingredients=False,
markup=current_bar.markup,
prep_line=True,
origin=current_bar.origin,
info=True,
variants=True), convert_to=current_bar.convert)
if not recipe.can_make:
flash('Ingredients to make this are out of stock :(', 'warning')
return render_template('order.html', form=form, recipe=recipe_html, show_form=False)
if request.method == 'GET':
show_form = True
if current_user.is_authenticated:
heading = "Order for {}:".format(current_user.get_name(short=True))
if current_bar.is_closed:
flash("It's closed. So sad.", 'warning')
if request.method == 'POST':
if 'submit-order' in request.form:
if form.validate():
if current_user.is_authenticated:
user_name = current_user.get_name()
user_email = current_user.email
else:
user_name = form.name.data
user_email = form.email.data
if current_bar.is_closed:
flash('The bar has been closed for orders.', 'warning')
return redirect(request.url)
# use simpler html for recording an order
email_recipe_html = recipe_as_html(recipe, DisplayOptions(
prices=current_bar.prices,
stats=False,
examples=True,
all_ingredients=False,
markup=current_bar.markup,
prep_line=True,
origin=current_bar.origin,
info=True,
variants=True), fancy=False, convert_to=current_bar.convert)
# add to the order database
order = Order(bar_id=current_bar.id, bartender_id=current_bar.bartender.id,
timestamp=datetime.datetime.utcnow(), user_email=user_email,
recipe_name=recipe.name, recipe_html=email_recipe_html)
if current_user.is_authenticated:
order.user_id = current_user.id
db.session.add(order)
db.session.commit()
subject = "{} for {} at {}".format(recipe.name, user_name, current_bar.name)
confirmation_link = "https://{}{}".format(request.host,
url_for('confirm_order',
order_id=order.id))
send_mail(subject, current_bar.bartender.email, "order_submitted",
confirmation_link=confirmation_link,
name=user_name,
notes=form.notes.data,
recipe_html=email_recipe_html)
flash("Your order has been submitted, and you'll receive a confirmation email once the bartender acknowledges it", 'success')
if not current_user.is_authenticated:
if User.query.filter_by(email=user_email).one_or_none():
flash("Hey, if you log in you won't have to keep typing your email address for orders ;)", 'secondary')
return redirect(url_for('security.login'))
else:
flash("Hey, if you register I'll remember your name and email in future orders!", 'secondary')
return redirect(url_for('security.register'))
return render_template('result.html', heading="Order Placed")
else:
flash("Error in form validation", 'danger')
# either provide the recipe and the form,
# or after the post show the result
return render_template('order.html', form=form, recipe=recipe_html, heading=heading, show_form=show_form)
@app.route('/confirm_order')
def confirm_order():
# TODO this needs a security token
order_id = request.args.get('order_id')
order = Order.query.filter_by(id=order_id).one_or_none()
if not order:
flash("Error: Invalid order_id", 'danger')
return render_template("result.html", heading="Invalid confirmation link")
if order.confirmed:
flash("Error: Order has already been confirmed", 'danger')
return render_template("result.html", heading="Invalid confirmation link")
bartender = user_datastore.find_user(id=order.bartender_id)
if bartender and bartender.venmo_id:
venmo_link = app.config.get('VENMO_LINK','').format(bartender.venmo_id)
else:
venmo_link = None
order.confirmed = datetime.datetime.utcnow()
# update users db
user = User.query.filter_by(email=order.user_email).one_or_none()
if user:
greeting = "{}, you".format(user.get_name(short=True))
if order.user_id and order.user_id != user.id:
flash("Order was created with different id than confirming user!", 'danger')
return render_template('result.html', heading="Invalid request")
user.orders.append(order)
user_datastore.put(user)
user_datastore.commit()
else:
greeting = "You"
bar = Bar.query.filter_by(id=order.bar_id).one_or_none()
if bar is None:
flash("Invalid bar id with order", 'danger')
return render_template('result.html', heading="Invalid request")
subject = "[Mix-Mind] Your {} Confirmation".format(current_bar.name)
sent = send_mail(subject, order.user_email, "order_confirmation",
greeting=greeting,
recipe_name=order.recipe_name,
recipe_html=order.recipe_html,
venmo_link=venmo_link)
if sent:
flash('Confirmation sent')
else:
flash('Confimration email failed', 'danger')
return render_template('result.html', heading="{} for {}".format(order.recipe_name, user.get_name(short=True) if user else order.user_email),
body=order.recipe_html)
@app.route('/user', methods=['GET', 'POST'])
@login_required
def user_profile():
try:
user_id = int(request.args.get('user_id'))
except ValueError:
flash("Invalid user_id parameter", 'danger')
return render_template('result.html', heading="User profile unavailable")
if current_user.id != user_id and not current_user.has_role('admin'):
return _get_unauthorized_view()
# leaving this trigger here because it's convenient
if current_user.email in app.config.get('MAKE_ADMIN', []):
if not current_user.has_role('admin'):
admin = user_datastore.find_role('admin')
user_datastore.add_role_to_user(current_user, admin)
user_datastore.commit()
flash("You have been upgraded to admin", 'success')
this_user = user_datastore.find_user(id=user_id)
if not this_user:
flash("Unknown user_id", 'danger')
return render_template('result.html', heading="User profile unavailable")
form = get_form(EditUserForm)
if request.method == 'POST':
if form.validate():
this_user.first_name = form.first_name.data
this_user.last_name = form.last_name.data
this_user.nickname = form.nickname.data
this_user.venmo_id = form.venmo_id.data
user_datastore.commit()
flash("Profile updated", 'success')
return redirect(request.url)
else:
flash("Error in form validation", 'danger')
return render_template('user_profile.html', this_user=this_user, edit_user=form,
human_timestamp=mms.time_human_formatter, human_timediff=mms.time_diff_formatter,
timestamp=mms.timestamp_formatter)
# TODO make admins able to edit user page
# pre-populate the form with the current values
for attr in 'first_name,last_name,nickname,venmo_id'.split(','):
setattr(getattr(form, attr), 'data', getattr(this_user, attr))
return render_template('user_profile.html', this_user=this_user, edit_user=form,
human_timestamp=mms.time_human_formatter, human_timediff=mms.time_diff_formatter,
timestamp=mms.timestamp_formatter)
@app.route("/user_post_login", methods=['GET'])
@login_required
def post_login_redirect():
# assign any orders with this user's email to the actual user ID
# these could be from before they registered or ordered while logged out
orders = Order.query.filter_by(user_email=current_user.email).all()
for order in orders:
if not order.user_id:
order.user_id = current_user.id
log.info("Attributing order {} to user {}".format(order.id, current_user.id))
db.session.commit()
return redirect(url_for('browse'))
@app.route('/user_post_confirm_email')
@login_required
def user_confirmation_hook():
if not current_user.has_role('customer'):
customer = user_datastore.find_role('customer')
user_datastore.add_role_to_user(current_user, customer)
user_datastore.commit()
return redirect(url_for('post_login_redirect'))
################################################################################
# Owner routes
################################################################################
def check_ownership(f):
""" Ensure current_user owns this bar, or is admin
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not current_user.has_role('admin') and current_user != current_bar.owner:
flash("You do not have permission to view this resource. This could have been an attempt to switch to a different bar when editing a bar of which you had control.", 'danger')
return render_template('result.html', heading="Not Authorized")
return f(*args, **kwargs)
return decorated_function
@app.route("/manage/bar", methods=['GET', 'POST'])
@login_required
@roles_accepted('admin', 'owner')
@check_ownership
def bar_settings():
BAR_BULK_ATTRS = 'name,tagline,is_public,prices,prep_line,examples,convert,markup,info,origin,variants,summarize'.split(',')
edit_bar_form = get_form(EditBarForm)
if request.method == 'POST':
# TODO invalid to have open without a bartender (js?)
if edit_bar_form.validate():
bar_id = current_bar.id
bar = Bar.query.filter_by(id=bar_id).one_or_none()
if bar is None:
flash("Invalid bar_id: {}".format(bar_id), 'danger')
return redirect(request.url)
# unassign previous bartender
if bar.bartender_on_duty:
old_bartender = user_datastore.find_user(id=bar.bartender_on_duty)
send_mail("[Mix-Mind] Bartender Duty Unassigned", old_bartender.email, 'simple',
heading="No longer bartending at {}".format(bar.name),
message="You have been unassigned as the bartender-on-duty at {}.".format(bar.name))
# add bartender on duty
user = user_datastore.find_user(email=edit_bar_form.bartender.data)
if user and user.id != bar.bartender_on_duty:
bartender = user_datastore.find_role('bartender')
user_datastore.add_role_to_user(user, bartender)
bar.bartenders.append(user)
bar.bartender_on_duty = user.id
send_mail("[Mix-Mind] Bartender Duty Assigned", user.email, 'simple',
heading="Bartending at {}".format(bar.name),
message="You have been assigned as the bartender-on-duty at {}.".format(bar.name))
else:
# closed/no bartender is same result
if not user or edit_bar_form.status.data == False:
bar.bartender_on_duty = None
for attr in BAR_BULK_ATTRS:
setattr(bar, attr, getattr(edit_bar_form, attr).data)
db.session.commit()
flash("Successfully updated config for {}".format(bar.cname))
return redirect(request.url)
else:
flash("Error in form validation", 'warning')
# for GET requests, fill in the edit bar form
edit_bar_form.status.data = not current_bar.is_closed
edit_bar_form.bartender.data = '' if current_bar.is_closed else current_bar.bartender.email
for attr in BAR_BULK_ATTRS:
setattr(getattr(edit_bar_form, attr), 'data', getattr(current_bar, attr))
if edit_bar_form is None:
return redirect(request.url)
orders = Order.query.filter_by(bar_id=current_bar.id)
order_table = orders_as_table(orders)
return render_template('bar_settings.html', edit_bar_form=edit_bar_form, order_table=order_table)
@app.route("/manage/ingredients", methods=['GET','POST'])
@login_required
@roles_accepted('admin', 'owner')
@check_ownership
def ingredient_stock():
form = get_form(BarstockForm)
upload_form = get_form(UploadBarstockForm)
form_open = False
log.debug("Form errors: {}".format(form.errors))
if request.method == 'POST':
log.debug(request)
if 'add-ingredient' in request.form:
if form.validate():
row = {}
row['Category'] = form.category.data
row['Type'] = form.type_.data
row['Kind'] = form.kind.data
row['ABV'] = float(form.abv.data)
row['Size (mL)'] = convert_units(float(form.size.data), form.unit.data, 'mL')
row['Price Paid'] = float(form.price.data)
try:
ingredient = Barstock_SQL(current_bar.id).add_row(row, current_bar.id)
except NameError as e:
flash('Error: {}'.format(e), 'danger')
else:
mms.regenerate_recipes(current_bar, ingredient=ingredient.type_)
return redirect(request.url)
else:
form_open = True
flash("Error in form validation", 'danger')
elif 'upload-csv' in request.form:
# TODO handle files < 500 kb by keeping in mem
csv_file = request.files['upload_csv']
if not csv_file or csv_file.filename == '':
flash('No selected file', 'danger')
return redirect(request.url)
tmp_filename = get_tmp_file()
csv_file.save(tmp_filename)
Barstock_SQL(current_bar.id).load_from_csv([tmp_filename], current_bar.id,
replace_existing=upload_form.replace_existing.data)
mms.generate_recipes(current_bar)
msg = "Ingredients database {} {} for {}".format(
"replaced by" if upload_form.replace_existing.data else "added to from",
csv_file.filename, current_bar.cname)
log.info(msg)
flash(msg, 'success')
return render_template('ingredients.html', form=form, upload_form=upload_form, form_open=form_open)
################################################################################
# Admin routes
################################################################################
@app.route("/admin/set_bar_owner", methods=['POST'])
@login_required
@roles_required('admin')
def set_bar_owner():
set_owner_form = get_form(SetBarOwnerForm)
if set_owner_form.validate():
bar_id = current_bar.id
bar = Bar.query.filter_by(id=bar_id).one_or_none()
if bar is None:
flash("Invalid bar_id: {}".format(bar_id), 'danger')
return None
# assign owner
user = user_datastore.find_user(email=set_owner_form.owner.data)
owner = user_datastore.find_role('owner')
old_owner = bar.owner
if user and user != bar.owner:
user_datastore.add_role_to_user(user, owner)
bar.owner = user
send_mail("[Mix-Mind] Bar Ownership Granted", user.email, 'simple',
heading="{}, you now own {}".format(user.get_name(), bar.name),
message="You have been assigned as the owner of {}</p><p>The bar can now be managed from the site. Switch to your bar, and then navigate to the management settings.".format(bar.name))
flash("{} is now the proud owner of {}".format(user.get_name(), bar.cname))
elif set_owner_form.owner.data == '' and bar.owner:
# remove the owner from this bar
flash("{} is no longer the owner of {}".format(bar.owner.get_name(), bar.cname))
bar.owner = None
# remove "owner" role if user does not own any more bars
if not old_owner.owns:
user_datastore.remove_role_from_user(old_owner, owner)
if old_owner:
send_mail("[Mix-Mind] Bar Ownership Revoked", old_owner.email, 'simple',
heading="{}, you no longer own {}".format(old_owner.get_name(), bar.name),
message="You have been unassigned as the owner of {}.".format(bar.name))
user_datastore.commit()
else:
flash("Error in form validation", 'warning')
return redirect(url_for('admin_dashboard'))
@app.route("/admin/dashboard", methods=['GET', 'POST'])
@login_required
@roles_required('admin')
def admin_dashboard():
new_bar_form = get_form(CreateBarForm)
set_owner_form = get_form(SetBarOwnerForm)
if request.method == 'POST':
if 'create_bar' in request.form:
if new_bar_form.validate():
if Bar.query.filter_by(cname=new_bar_form.cname.data).one_or_none():
flash("Bar name already in use", 'warning')
return None
bar_args = {'cname': new_bar_form.cname.data}
if new_bar_form.name.data == "":
bar_args['name'] = bar_args['cname']
else:
bar_args['name'] = new_bar_form.name.data
if new_bar_form.tagline.data:
bar_args['tagline'] = new_bar_form.tagline.data
new_bar = Bar(**bar_args)
db.session.add(new_bar)
db.session.commit()
flash("Created a new bar", 'success')
else:
flash("Error in form validation", 'warning')
if 'set-default-bar' in request.form:
bar_id = request.form.get('bar_id', None, int)
to_activate_bar = Bar.query.filter_by(id=bar_id).one_or_none()
if to_activate_bar.is_default:
flash("Bar ID: {} is already the default".format(bar_id), 'warning')
return redirect(request.url)
if not to_activate_bar:
flash("Error: Bar ID: {} is invalid".format(bar_id), 'danger')
return redirect(request.url)
bars = Bar.query.all()
for bar in bars:
bar.is_default = (bar.id == bar_id)
db.session.commit()
flash("Bar ID: {} is now the default".format(bar_id), 'success')
return redirect(request.url)
set_owner_form.owner.data = '' if not current_bar.owner else current_bar.owner.email
bars = Bar.query.all()
users = User.query.all()
orders = Order.query.all()
#bar_table = bars_as_table(bars)
user_table = users_as_table(users)
order_table = orders_as_table(orders)
return render_template('dashboard.html', new_bar_form=new_bar_form,
set_owner_form=set_owner_form, users=users, orders=orders,
bars=bars, user_table=user_table, order_table=order_table)
@app.route("/admin/menu_generator", methods=['GET', 'POST'])
@login_required
@roles_required('admin')
def menu_generator():
return render_template('result.html', heading="Still under construction...")
form = get_form(DrinksForm)
log.debug("Form errors: {}".format(form.errors))
recipes = []
excluded = None
stats = None
if request.method == 'POST':
if form.validate():
log.info(request)
recipes, excluded, stats = recipes_from_options(form, to_html=True)
flash("Settings applied. Showing {} available recipes".format(len(recipes)))
else:
flash("Error in form validation", 'danger')
return render_template('menu_generator.html', form=form, recipes=recipes, excluded=excluded, stats=stats)
@app.route("/admin/menu_generator/download/", methods=['POST'])
@login_required
@roles_required('admin')
def menu_download():
form = get_form(DrinksForm)
raise NotImplementedError
if form.validate():
log.info(request)
recipes, _, _ = recipes_from_options(form)
display_options = bundle_options(DisplayOptions, form)
form.pdf_filename.data = 'menus/{}'.format(filename_from_options(bundle_options(PdfOptions, form), display_options))
pdf_options = bundle_options(PdfOptions, form)
pdf_file = '{}.pdf'.format(pdf_options.pdf_filename)
generate_recipes_pdf(recipes, pdf_options, display_options, mms.barstock.df)
return send_file(os.path.abspath(pdf_file), 'application/pdf', as_attachment=True, attachment_filename=pdf_file.lstrip('menus/'))
else:
flash("Error in form validation", 'danger')
return render_template('application_main.html', form=form, recipes=[], excluded=None)
@app.route("/admin/recipes", methods=['GET','POST'])
@login_required
@roles_required('admin')
def recipe_library():
return render_template('result.html', heading="Still under construction...")
select_form = get_form(RecipeListSelector)
log.debug("Form errors: {}".format(select_form.errors))
add_form = get_form(RecipeForm)
log.debug("Form errors: {}".format(add_form.errors))
if request.method == 'POST':
log.info(request)
if 'recipe-list-select' in request.form:
recipes = select_form.recipes.data
mms.regenerate_recipes(current_bar)
flash("Now using recipes from {}".format(recipes))
return render_template('recipes.html', select_form=select_form, add_form=add_form)
################################################################################
# API routes
################################################################################
# All of these routes are designed to be used against ajax calls
# Each route will return a json object with the following parameters:
# status: "success" - successful go ahead and use the data
# "error" - something went wrong
# message: "..." - error message
# data: {...} - the expected data returned to caller
def api_error(message, **kwargs):
return jsonify(status="error", message=message, **kwargs)
def api_success(data, message="", **kwargs):
return jsonify(status="success", message=message, data=data, **kwargs)
@app.route("/api/ingredients", methods=['GET'])
@login_required
@roles_accepted('admin', 'owner')
@check_ownership
def api_ingredients():
ingredients = Ingredient.query.filter_by(bar_id=current_bar.id).order_by(Ingredient.Category, Ingredient.Type).all()
ingredients = [i.as_dict() for i in ingredients]
return api_success(ingredients)
@app.route("/api/ingredient", methods=['POST', 'GET', 'PUT', 'DELETE'])
@login_required
@roles_accepted('admin', 'owner')
@check_ownership
def api_ingredient():
"""CRUD endpoint for individual ingredients
Indentifying parameters:
:param string iid: iid of the changed row's ingredient
Create params:
:param string Category: Category idenfitier
:param string Kind: kind for ingredient
:param string Type: type for ingredient
:param float ABV: ABV value
:param float Size: Size
:param string Unit: one of util.VALID_UNITS
:param float Price: price of the ingredint
Read:
Update:
:param string field: the value being modified
:param string value: the new value (type coerced from field)
Delete:
"""
# check parameters
iid = request.form.get('iid')
if iid is None and request.method in ['PUT', 'DELETE']:
return api_error("iid is a required parameter for {}".fromat(request.method))
ingredient = Ingredient.query_by_iid(iid)
if not ingredient and not request.method == 'POST':
return api_error("Ingredient not found")
if ingredient.bar_id != current_bar.id:
return api_error("Request iid {} includes wrong bar_id".format(iid))
# create
if request.method == 'POST':
if ingredient:
return api_error("Ingredient '{}' already exists, try editing it instead".format(ingredient))
return api_error("Not implemented")
# read
elif request.method == 'GET':
return api_success(ingredient.as_dict(), messaage="Ingredient: {}".format(ingredient))
# update
elif request.method == 'PUT':
field = request.form.get('field')
if not field:
return api_error("'field' is a required parameter")
elif field not in "Category,Type,Kind,In_Stock,ABV,Size_mL,Size_oz,Price_Paid".split(','):
return api_error("'{}' is not allowed to be edited via the API".format(field))
value = request.form.get('value')
if not value:
return api_error("'value' is a required parameter")
# TODO value constraints
try:
# the toggle switches return 'on'/'off'
# but that is their current state, so toggle value here
if field == 'In_Stock':
value = {'on': False, 'off': True}[value]
else:
value = type(ingredient[field])(value)
except AttributeError:
return api_error("Invalid field '{}' for an Ingredient".format(field))
except ValueError as e:
return api_error(str(e))
# special handling
if field == 'Size_oz':
# convert to mL because that's how everything works
ingredient['Size_mL'] = convert_units(value, 'oz', 'mL')
else:
ingredient[field] = value
if field in ['Size_mL', 'Size_oz', 'Price_Paid', 'Type']:
_update_computed_fields(ingredient)
try:
db.session.commit()
except Exception as e:
return api_error("{}: {}".format(e.__class__.__name__, e))
data = ingredient.as_dict()
mms.regenerate_recipes(current_bar, ingredient=ingredient.type_)
return api_success(data, message='Successfully updated "{}" for "{}"'.format(field, ingredient.iid()))
# delete
elif request.method == 'DELETE':
db.session.delete(ingredient)
db.session.commit()
mms.regenerate_recipes(current_bar, ingredient=ingredient.type_)
return api_success({'iid': ingredient.iid()}, message='Successfully deleted "{}"'.format(ingredient.iid()))
return api_error("Unknwon method")
@app.route("/api/ingredients/download", methods=['GET'])
@login_required
@roles_accepted('admin', 'owner')
@check_ownership
def api_ingredients_download():
ingredients = Ingredient.query.filter_by(bar_id=current_bar.id).order_by(Ingredient.Category, Ingredient.Type).all()
ingredients = [Ingredient.csv_heading()] + [i.as_csv() for i in ingredients]
filename = "{}_ingredients_{}.csv".format(current_bar.cname.replace(' ','_'), pendulum.now().int_timestamp)
tmp_filename = get_tmp_file()
with open(tmp_filename, 'w') as fp:
fp.writelines((i for i in ingredients))
return send_file(tmp_filename, 'text/csv', as_attachment=True, attachment_filename=filename)
@app.route("/api/user_current_bar", methods=['POST', 'GET', 'PUT', 'DELETE'])
@login_required
def api_user_current_bar():
"""Request endpoint to change a user's current bar
:param int user_id: ID of the user to modity
:param int bar_id: ID of the bar to set as user's current view default
If 0, will use the configured default bar
:param string next: Should be the URL of the current page so the user
can be redirected to that page
"""
try:
user_id = int(request.args.get('user_id'))
except ValueError:
flash("Invalid user_id parameter", 'danger')
return render_template('result.html', heading="User profile unavailable")
try:
bar_id = int(request.args.get('bar_id'))
except ValueError:
flash("Invalid bar_id parameter", 'danger')
return render_template('result.html', heading="Bar unavailable")
next_url = request.args.get('next', url_for('browse'))
user = user_datastore.find_user(id=user_id)
if user:
if user != current_user and not current_user.has_role('admin'):
flash("Cannot change default bar for another user {}".format(user_id), 'danger')
return render_template('result.html', heading="Invalid default bar request")
bar = Bar.query.filter_by(id=bar_id).one_or_none()
if not bar:
flash("Invalid bar id: {}".format(bar_id))
return render_template('result.html', heading="Invalid bar")
if not bar.is_public and (user.id != bar.owner_id) and not user.has_role('admin'):
flash("Bar {} is not publicly available, you need to be the owner or admin".format(bar_id), 'danger')
return render_template('result.html', heading="Invalid bar")
user.current_bar_id = bar.id
user_datastore.commit()
else:
flash("Invalid user {}".format(user_id), 'danger')
return render_template('result.html', heading="Invalid user")
return redirect(next_url)
@app.route("/dev/debug", methods=['GET'])
@login_required
@roles_required('admin')
def dev_debug_console():
if app.config.get('DEBUG', False):
import ipdb; ipdb.set_trace();
return render_template('result.html', heading="Finished debug session...")
else:
return render_template('result.html', heading="Debug unavailable")
@app.route("/dev/debug/error_handler", methods=['GET'])
@login_required
@roles_required('admin')
def dev_debug_error_handler_page():
"""Tracebacks are surfaced in dev mode, so this is where
to test the 500 handler page
"""
if app.config.get('DEBUG', False):
return render_template('error.html')
else:
return render_template('result.html', heading="Debug unavailable")
################################################################################
# Helper routes
################################################################################
@app.route('/api/json/<recipe_name>')
def recipe_json(recipe_name):
recipe_name = urllib.parse.unquote_plus(recipe_name)
try:
return jsonify(mms.base_recipes[recipe_name])
except KeyError:
return "{} not found".format(recipe_name)
@app.errorhandler(500)
def handle_internal_server_error(e):
flash(e, 'danger')
return render_template('error.html')#, 500
@app.route("/api/test")
def api_test():
a = request.args.get('a', 0, type=int)
b = request.args.get('b', 0, type=int)
return jsonify(result=a + b)
|
{
"content_hash": "455606482a0f782f5df33cd445c9845b",
"timestamp": "",
"source": "github",
"line_count": 887,
"max_line_length": 203,
"avg_line_length": 42.95377677564825,
"alnum_prop": 0.6020734908136482,
"repo_name": "twschum/mix-mind",
"id": "b15f2924c16661ae9a203380f80b729d1d51b69a",
"size": "38100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mixmind/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5158"
},
{
"name": "HTML",
"bytes": "51762"
},
{
"name": "JavaScript",
"bytes": "24582"
},
{
"name": "Python",
"bytes": "151829"
},
{
"name": "Shell",
"bytes": "259"
}
],
"symlink_target": ""
}
|
import os
from girder.models.setting import Setting
class ImageSpaceSetting(Setting):
requiredSettings = ('IMAGE_SPACE_SOLR',
'IMAGE_SPACE_PREFIX',
'IMAGE_SPACE_SOLR_PREFIX')
def validateImageSpaceSolr(self, doc):
return doc.rstrip('/')
def validateImageSpaceSolrPrefix(self, doc):
return doc.rstrip('/')
def validateImageSpacePrefix(self, doc):
return doc.rstrip('/')
def get(self, key):
storedSetting = super(ImageSpaceSetting, self).get(key)
if os.environ.get(key, '') != '':
return os.environ.get(key)
elif storedSetting is not None:
return storedSetting
elif key in self.requiredSettings:
raise Exception('ImageSpace will not function without the %s setting.' % key)
else:
return False
|
{
"content_hash": "b427b2b1a435174f3cad7a8e0186a135",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 89,
"avg_line_length": 29.3,
"alnum_prop": 0.6063708759954494,
"repo_name": "smadha/image_space",
"id": "1b9d60dc48024d0f4fc19a7bbdca0b464a244b38",
"size": "879",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "imagespace/server/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "555"
},
{
"name": "HTML",
"bytes": "4080"
},
{
"name": "JavaScript",
"bytes": "38903"
},
{
"name": "Python",
"bytes": "101424"
},
{
"name": "Shell",
"bytes": "5845"
}
],
"symlink_target": ""
}
|
"""Probabilistic neural layers.
See ${python/contrib.bayesflow.layers}.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.bayesflow.python.ops.layers_dense_variational_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'DenseVariational',
'dense_variational',
'default_loc_scale_fn',
'default_mean_field_normal_fn',
]
remove_undocumented(__name__, _allowed_symbols)
|
{
"content_hash": "19b29384360c7fcb6d1f1eaf6bee43f3",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 83,
"avg_line_length": 26.652173913043477,
"alnum_prop": 0.7487765089722676,
"repo_name": "laszlocsomor/tensorflow",
"id": "dcead38af826a12e776160bdb251ba021e6b953c",
"size": "1302",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/bayesflow/python/ops/layers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8535"
},
{
"name": "C",
"bytes": "314362"
},
{
"name": "C++",
"bytes": "34295651"
},
{
"name": "CMake",
"bytes": "211937"
},
{
"name": "Go",
"bytes": "1012495"
},
{
"name": "Java",
"bytes": "533607"
},
{
"name": "Jupyter Notebook",
"bytes": "1940884"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "44807"
},
{
"name": "Objective-C",
"bytes": "12460"
},
{
"name": "Objective-C++",
"bytes": "94483"
},
{
"name": "PHP",
"bytes": "1429"
},
{
"name": "Perl",
"bytes": "6186"
},
{
"name": "Perl 6",
"bytes": "1360"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "30060071"
},
{
"name": "Ruby",
"bytes": "547"
},
{
"name": "Shell",
"bytes": "402121"
}
],
"symlink_target": ""
}
|
import olympia.core.logger
from olympia.amo.celery import task
from olympia.tags.models import Tag
task_log = olympia.core.logger.getLogger('z.task')
@task(rate_limit='10/m')
def update_all_tag_stats(pks, **kw):
task_log.info("[%s@%s] Calculating stats for tags starting with %s" %
(len(pks), update_all_tag_stats.rate_limit, pks[0]))
for tag in Tag.objects.filter(pk__in=pks):
tag.update_stat()
@task(rate_limit='1000/m')
def update_tag_stat(tag, **kw):
task_log.info("[1@%s] Calculating stats for tag %s" %
(update_tag_stat.rate_limit, tag.pk))
tag.update_stat()
|
{
"content_hash": "47f3e14a22a85e212dfda30c967db657",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 73,
"avg_line_length": 30.047619047619047,
"alnum_prop": 0.6434231378763867,
"repo_name": "harikishen/addons-server",
"id": "9b1c789188f37d98d956725b7a0613a4ece30384",
"size": "631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/tags/tasks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "822508"
},
{
"name": "HTML",
"bytes": "698554"
},
{
"name": "JavaScript",
"bytes": "1087360"
},
{
"name": "Makefile",
"bytes": "811"
},
{
"name": "PLSQL",
"bytes": "990"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "4560536"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "7564"
},
{
"name": "Smarty",
"bytes": "1859"
}
],
"symlink_target": ""
}
|
import os
import sys
import shutil
files = []
for dirpath, dirnames, filenames in os.walk(".", True):
if dirpath.find(".svn")>=0: continue
for elem in dirnames:
if elem.find(".svn") >= 0:
dirnames.pop(dirnames.index(elem))
for filename in filenames:
if filename.endswith(".pyc") or filename.endswith("~"):
files.append(os.path.join(dirpath, filename))
for file in files:
if '--really' in sys.argv:
print "Deleting", file
os.unlink(file)
else:
print file
if '--really' in sys.argv:
try:
shutil.rmtree('build')
except:
pass
try:
shutil.rmtree('dist')
except:
pass
try:
shutil.rmtree('django_platnosci.egg-info')
except:
pass
|
{
"content_hash": "7bcfa9a177455d53fae4c7f8658a4135",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 63,
"avg_line_length": 23.37142857142857,
"alnum_prop": 0.5501222493887531,
"repo_name": "mpasternak/django-platnosci",
"id": "c9f73788e8f39e7887f3eb7d80cee6c54766edef",
"size": "818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cleanup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl6",
"bytes": "902"
},
{
"name": "Python",
"bytes": "40693"
}
],
"symlink_target": ""
}
|
import logging
import numpy as np
import copy
logger = logging.getLogger("vaex.ui.undo")
class UndoManager(object):
def __init__(self, max_bytes=1024**3):
self.actions_undo = []
self.actions_redo = []
self.undo_count = 0 # number of times undo is pressed
def undo(self):
logger.debug("history was %r-%r" % (self.actions_undo, self.actions_redo))
action = self.actions_undo.pop()
logger.debug("undoing: %r" % action)
self.actions_redo.insert(0, action)
try:
action.undo()
except:
logger.exception("error executing action")
logger.debug("history is %r-%r" % (self.actions_undo, self.actions_redo))
def add_action(self, action):
# cut off any remaining 'redo' action, and add action to the list
logger.debug("history was %r-%r" % (self.actions_undo, self.actions_redo))
logger.debug("adding action: %r" % action)
self.actions_redo = []
self.actions_undo.append(action)
logger.debug("history is %r-%r" % (self.actions_undo, self.actions_redo))
def redo(self):
logger.debug("history was %r-%r" % (self.actions_undo, self.actions_redo))
logger.debug("redoing")
action = self.actions_redo.pop(0)
try:
action.do()
except:
logger.exception("error executing action")
self.actions_undo.append(action)
logger.debug("history is %r-%r" % (self.actions_undo, self.actions_redo))
def can_undo(self):
return len(self.actions_undo) > 0
def can_redo(self):
return len(self.actions_redo) > 0
class Action(object):
"""
action should support
- byteSize() # nr of bytes the action occupies
- do() - does the operation, used at moment of the actual action, and during redo
- redo() - redo the operation
- description() - gui friendly desciption
and actions should add itself to it's UndoManager
"""
pass
class ActionMask(Action):
def __init__(self, undo_manager, description, mask, apply_mask):
""" Assuming mask is a bool array"""
# store the 1 byte mask as a 1 bit mask to save memory
self.undo_manager = undo_manager
self.data = None if mask is None else np.packbits(mask.astype(np.uint8))
self.length = 0 if mask is None else len(mask)
self._description = description
self.mask = mask
self.apply_mask = apply_mask
self.undo_manager.add_action(self)
def description(self):
return self._description
def do(self):
mask = None if self.data is None else np.unpackbits(self.data).astype(np.bool)[:self.length]
self.apply_mask(mask)
def undo(self):
# find a previous ActionMask, and execute it, but just keep it in the history
for action in self.undo_manager.actions_undo[::-1]: # traverse from most recent to last
if isinstance(action, ActionMask):
action.do()
break
else: # if nothing was selected before, select None
self.apply_mask(None)
class ActionZoom(Action):
def __init__(self, undo_manager, description, apply_ranges, all_axis_indices,
previous_ranges_viewport, previous_range_level_show, axis_indices, ranges_viewport=None, range_level_show=None):
self.undo_manager = undo_manager
self.apply_ranges = apply_ranges
self.all_axis_indices = all_axis_indices
# self.previous_ranges = list(previous_ranges)
self.previous_ranges_viewport = copy.deepcopy(previous_ranges_viewport)
self.previous_range_level_show = None if previous_range_level_show is None else copy.deepcopy(previous_range_level_show)
self.axis_indices = axis_indices
# self.ranges = ranges
self.ranges_viewport = copy.deepcopy(ranges_viewport)
self.range_level_show = copy.deepcopy(range_level_show)
self._description = description
self.undo_manager.add_action(self)
def description(self):
return self._description
def do(self):
self.apply_ranges(self.axis_indices, self.ranges_viewport, self.range_level_show)
def undo(self):
self.apply_ranges(self.all_axis_indices, self.previous_ranges_viewport, self.previous_range_level_show)
|
{
"content_hash": "ae294e021ce974354843ff6b5a93c146",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 129,
"avg_line_length": 36.97457627118644,
"alnum_prop": 0.6348842539537016,
"repo_name": "maartenbreddels/vaex",
"id": "92d326dab70f505220eefdc935db413f8fd20ee9",
"size": "4363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/vaex-ui/vaex/ui/undo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1888"
},
{
"name": "C++",
"bytes": "81166"
},
{
"name": "CSS",
"bytes": "6604"
},
{
"name": "GLSL",
"bytes": "6204"
},
{
"name": "HTML",
"bytes": "177613"
},
{
"name": "JavaScript",
"bytes": "1489136"
},
{
"name": "Makefile",
"bytes": "432"
},
{
"name": "PHP",
"bytes": "33807"
},
{
"name": "Python",
"bytes": "1893232"
},
{
"name": "Shell",
"bytes": "4639"
}
],
"symlink_target": ""
}
|
from imgurpython import ImgurClient
import click
import os
@click.command()
@click.argument('gif', type=click.Path(exists=True))
def upload_gif(gif):
"""Uploads an image file to Imgur"""
client_id = os.environ.get('IMGUR_API_ID')
client_secret = os.environ.get('IMGUR_API_SECRET')
if client_id is None or client_secret is None:
click.echo('Cannot upload - could not find IMGUR_API_ID or IMGUR_API_SECRET environment variables')
return
client = ImgurClient(client_id, client_secret)
click.echo('Uploading file {}'.format(click.format_filename(gif)))
response = client.upload_from_path(gif)
click.echo('File uploaded - see your gif at {}'.format(response['link']))
if __name__ == '__main__':
upload_gif()
|
{
"content_hash": "31249202450baf323ff2d9923db92b17",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 107,
"avg_line_length": 27.321428571428573,
"alnum_prop": 0.6784313725490196,
"repo_name": "atbaker/imgur-uploader",
"id": "ec0b1e80ff6f58279511012919feb15992e116ec",
"size": "765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imgur_uploader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2091"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mezzanine.core.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('pages', '0003_auto_20150527_1555'),
]
operations = [
migrations.CreateModel(
name='CustomPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='pages.Page')),
('content', mezzanine.core.fields.RichTextField(verbose_name='Content')),
('banner', models.ImageField(upload_to=b'banners')),
],
options={
'ordering': ('_order',),
},
bases=('pages.page', models.Model),
),
]
|
{
"content_hash": "9be53e056d8b543ab2e9cbac19d8c796",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 185,
"avg_line_length": 30.551724137931036,
"alnum_prop": 0.5880361173814899,
"repo_name": "serialworm/jessiebeemine",
"id": "5f21af8aaefe57cbee12a414e44815d53f150da7",
"size": "958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/mysite/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8147"
},
{
"name": "HTML",
"bytes": "14538"
},
{
"name": "Python",
"bytes": "40712"
}
],
"symlink_target": ""
}
|
for f in AllFonts():
hasEdits = False
for name, members in f.groups.items():
groupHasEdits = False
new = []
for m in members:
if m[-1] == "'":
groupHasEdits = True
hasEdits = True
new.append(m[:-1])
else:
new.append(m)
f.groups[name]=new
if hasEdits:
print "edits made in ", f.info.fullName
f.save()
else:
print "no edits made", f.info.fullName
|
{
"content_hash": "06abd365b2b0c5f1b1b9774ef60ef67a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 47,
"avg_line_length": 27,
"alnum_prop": 0.46393762183235865,
"repo_name": "typemytype/RoboFontExamples",
"id": "5b331c2066db077ace61434be519cb6efa9f9196",
"size": "514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fontGroups/changeGroupSingleQuote.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54401"
}
],
"symlink_target": ""
}
|
"""Tests for prefetching_ops_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distribute.python import prefetching_ops_v2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class PrefetchingOpsV2Test(test.TestCase):
def testPrefetchToOneDevice(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops_v2.prefetch_to_devices("/gpu:0"))
iterator = device_dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with self.cached_session() as sess:
for i in range(10):
self.assertEqual(i, sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testPrefetchToTwoDevicesInAList(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops_v2.prefetch_to_devices(["/cpu:0", "/gpu:0"]))
iterator = device_dataset.make_one_shot_iterator()
next_element = iterator.get_next()
output = []
# TODO(rohanj): Modify test to go till the end of the dataset when we
# switch to MultiDeviceIterator.
with self.cached_session() as sess:
for _ in range(4):
result = sess.run(next_element)
self.assertEqual(2, len(result))
output.extend(result)
self.assertEquals(set(range(8)), set(output))
def testPrefetchToTwoDevicesWithReinit(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
host_dataset = dataset_ops.Dataset.range(10)
device_dataset = host_dataset.apply(
prefetching_ops_v2.prefetch_to_devices(["/cpu:0", "/gpu:0"]))
iterator = device_dataset.make_initializable_iterator()
next_element = iterator.get_next()
# TODO(rohanj): Modify test to go till the end of the dataset when we
# switch to MultiDeviceIterator.
with self.cached_session() as sess:
sess.run(iterator.initializer)
for _ in range(4):
sess.run(next_element)
sess.run(iterator.initializer)
for _ in range(4):
sess.run(next_element)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "cef3e7bd45df110f8db212c18f70b03d",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 73,
"avg_line_length": 33.276315789473685,
"alnum_prop": 0.6888098062475286,
"repo_name": "kobejean/tensorflow",
"id": "16799104e8112f4391152c0cf2a15af81f8c2c9d",
"size": "3218",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distribute/python/prefetching_ops_v2_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2867"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "341894"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "49273038"
},
{
"name": "CMake",
"bytes": "195712"
},
{
"name": "Dockerfile",
"bytes": "36400"
},
{
"name": "Go",
"bytes": "1253646"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "836009"
},
{
"name": "Jupyter Notebook",
"bytes": "2604741"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52734"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41122917"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "466896"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
import json
import re
from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import transaction
from django.utils.html import format_html
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy, gettext_noop
from couchdbkit import ResourceNotFound
from crispy_forms.bootstrap import FieldWithButtons, InlineField, StrictButton
from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, Div, Field, Fieldset, Layout
from memoized import memoized
from dimagi.utils.django.fields import TrimmedCharField
from corehq.apps.casegroups.models import CommCareCaseGroup
from corehq.apps.data_interfaces.models import (
AutomaticUpdateRule,
CaseRuleAction,
CaseRuleCriteria,
ClosedParentDefinition,
CustomActionDefinition,
CustomMatchDefinition,
LocationFilterDefinition,
MatchPropertyDefinition,
UCRFilterDefinition,
UpdateCaseDefinition,
)
from corehq.apps.hqwebapp.crispy import HQFormHelper
from corehq.apps.hqwebapp.widgets import SelectToggle
from corehq.apps.locations.models import SQLLocation
from corehq.apps.reports.analytics.esaccessors import get_case_types_for_domain
from corehq.apps.userreports.exceptions import BadSpecError
from corehq.apps.userreports.filters.factory import FilterFactory
from corehq.apps.userreports.specs import FactoryContext
from corehq.toggles import CASE_UPDATES_UCR_FILTERS
def true_or_false(value):
if value == 'true':
return True
elif value == 'false':
return False
raise ValueError("Expected 'true' or 'false'")
def remove_quotes(value):
if isinstance(value, str) and len(value) >= 2:
for q in ("'", '"'):
if value.startswith(q) and value.endswith(q):
return value[1:-1]
return value
def is_valid_case_property_name(value):
if not isinstance(value, str):
return False
try:
validate_case_property_characters(value)
return True
except ValidationError:
return False
def validate_case_property_characters(value):
if not re.match('^[a-zA-Z0-9_-]+$', value):
raise ValidationError(
_("Property names should only contain alphanumeric characters, underscore, or hyphen.")
)
def validate_case_property_name(value, allow_parent_case_references=True):
if not isinstance(value, str):
raise ValidationError(_("Please specify a case property name."))
value = value.strip()
if not value:
raise ValidationError(_("Please specify a case property name."))
if not allow_parent_case_references:
if '/' in value:
raise ValidationError(
_("Invalid character '/' in case property name: '{}'. "
"Parent or host case references are not allowed.").format(value)
)
validate_case_property_characters(value)
else:
property_name = re.sub('^(parent/|host/)+', '', value)
if not property_name:
raise ValidationError(_("Please specify a case property name."))
if '/' in property_name:
raise ValidationError(
_("Case property reference cannot contain '/' unless referencing the parent "
"or host case with 'parent/' or 'host/'")
)
validate_case_property_characters(property_name)
return value
def hidden_bound_field(field_name, data_value):
return Field(
field_name,
type='hidden',
data_bind='value: %s' % data_value,
)
def validate_case_property_value(value):
if not isinstance(value, str):
raise ValidationError(_("Please specify a case property value."))
value = remove_quotes(value.strip()).strip()
if not value:
raise ValidationError(_("Please specify a case property value."))
return value
def validate_non_negative_days(value):
try:
value = int(value)
except (TypeError, ValueError):
raise ValidationError(_("Please enter a number of days greater than or equal to zero"))
if value < 0:
raise ValidationError(_("Please enter a number of days greater than or equal to zero"))
return value
class AddCaseGroupForm(forms.Form):
name = forms.CharField(required=True, label=gettext_noop("Group Name"))
def __init__(self, *args, **kwargs):
super(AddCaseGroupForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_style = 'inline'
self.helper.form_show_labels = False
self.helper.layout = Layout(
InlineField('name'),
StrictButton(
format_html('<i class="fa fa-plus"></i> {}', _("Add Group")),
css_class='btn-primary',
type="submit"
)
)
def create_group(self, domain):
group = CommCareCaseGroup(
name=self.cleaned_data['name'],
domain=domain
)
group.save()
return group
class UpdateCaseGroupForm(AddCaseGroupForm):
item_id = forms.CharField(widget=forms.HiddenInput())
action = forms.CharField(widget=forms.HiddenInput(), initial="update_case_group")
def __init__(self, *args, **kwargs):
super(UpdateCaseGroupForm, self).__init__(*args, **kwargs)
self.fields['name'].label = ""
self.helper.form_style = 'inline'
self.helper.form_method = 'post'
self.helper.form_show_labels = True
self.helper.layout = Layout(
'item_id',
'action',
FieldWithButtons(
Field('name', placeholder="Group Name"),
StrictButton(
_("Update Group Name"),
css_class='btn-primary',
type="submit",
)
),
)
def clean(self):
cleaned_data = super(UpdateCaseGroupForm, self).clean()
try:
self.current_group = CommCareCaseGroup.get(self.cleaned_data.get('item_id'))
except AttributeError:
raise forms.ValidationError(_("Please include the case group ID."))
except ResourceNotFound:
raise forms.ValidationError(_("A case group was not found with that ID."))
return cleaned_data
def update_group(self):
self.current_group.name = self.cleaned_data['name']
self.current_group.save()
return self.current_group
class AddCaseToGroupForm(forms.Form):
case_identifier = forms.CharField(label=gettext_noop("Case ID, External ID, or Phone Number"))
def __init__(self, *args, **kwargs):
super(AddCaseToGroupForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_style = 'inline'
self.helper.form_show_labels = False
self.helper.layout = Layout(
InlineField(
'case_identifier'
),
StrictButton(
format_html('<i class="fa fa-plus"></i> {}', _("Add Case")),
css_class='btn-primary',
type="submit"
)
)
class CaseUpdateRuleForm(forms.Form):
# Prefix to avoid name collisions; this means all input
# names in the HTML are prefixed with "rule-"
prefix = "rule"
name = TrimmedCharField(
label=gettext_lazy("Name"),
required=True,
)
def compute_initial(self, domain, rule):
return {
'name': rule.name,
}
def __init__(self, domain, *args, **kwargs):
if 'initial' in kwargs:
raise ValueError(_("Initial values are set by the form"))
self.is_system_admin = kwargs.pop('is_system_admin', False)
rule = kwargs.pop('rule', None)
if rule:
kwargs['initial'] = self.compute_initial(domain, rule)
super(CaseUpdateRuleForm, self).__init__(*args, **kwargs)
self.domain = domain
self.helper = HQFormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Fieldset(
_("Basic Information"),
Field('name', data_bind='name'),
),
)
class CaseRuleCriteriaForm(forms.Form):
# Prefix to avoid name collisions; this means all input
# names in the HTML are prefixed with "criteria-"
prefix = "criteria"
case_type = forms.ChoiceField(
label=gettext_lazy("Case Type"),
required=True,
)
criteria_operator = forms.ChoiceField(
label=gettext_lazy("Run when"),
required=False,
initial='ALL',
choices=AutomaticUpdateRule.CriteriaOperator.choices,
widget=SelectToggle(
choices=AutomaticUpdateRule.CriteriaOperator.choices,
attrs={"ko_value": "criteriaOperator"}
),
)
filter_on_server_modified = forms.CharField(required=False, initial='false')
server_modified_boundary = forms.CharField(required=False, initial='')
custom_match_definitions = forms.CharField(required=False, initial='[]')
property_match_definitions = forms.CharField(required=False, initial='[]')
filter_on_closed_parent = forms.CharField(required=False, initial='false')
location_filter_definition = forms.CharField(required=False, initial='')
ucr_filter_definitions = forms.JSONField(required=False, initial=list)
@property
def current_values(self):
return {
'filter_on_server_modified': self['filter_on_server_modified'].value(),
'server_modified_boundary': self['server_modified_boundary'].value(),
'custom_match_definitions': json.loads(self['custom_match_definitions'].value()),
'property_match_definitions': json.loads(self['property_match_definitions'].value()),
'filter_on_closed_parent': self['filter_on_closed_parent'].value(),
'case_type': self['case_type'].value(),
'location_filter_definition': self['location_filter_definition'].value(),
'criteria_operator': self['criteria_operator'].value(),
'ucr_filter_definitions': json.loads(self['ucr_filter_definitions'].value()),
}
@property
def constants(self):
return {
'MATCH_DAYS_BEFORE': MatchPropertyDefinition.MATCH_DAYS_BEFORE,
'MATCH_DAYS_AFTER': MatchPropertyDefinition.MATCH_DAYS_AFTER,
'MATCH_EQUAL': MatchPropertyDefinition.MATCH_EQUAL,
'MATCH_NOT_EQUAL': MatchPropertyDefinition.MATCH_NOT_EQUAL,
'MATCH_HAS_VALUE': MatchPropertyDefinition.MATCH_HAS_VALUE,
'MATCH_HAS_NO_VALUE': MatchPropertyDefinition.MATCH_HAS_NO_VALUE,
'MATCH_REGEX': MatchPropertyDefinition.MATCH_REGEX,
}
def compute_initial(self, domain, rule):
initial = {
'case_type': rule.case_type,
'criteria_operator': rule.criteria_operator,
'filter_on_server_modified': 'true' if rule.filter_on_server_modified else 'false',
'server_modified_boundary': rule.server_modified_boundary,
}
custom_match_definitions = []
property_match_definitions = []
ucr_filter_definitions = []
for criteria in rule.memoized_criteria:
definition = criteria.definition
if isinstance(definition, MatchPropertyDefinition):
property_match_definitions.append({
'property_name': definition.property_name,
'property_value': definition.property_value,
'match_type': definition.match_type,
})
elif isinstance(definition, CustomMatchDefinition):
custom_match_definitions.append({
'name': definition.name,
})
elif isinstance(definition, ClosedParentDefinition):
initial['filter_on_closed_parent'] = 'true'
elif isinstance(definition, LocationFilterDefinition):
location_id = definition.location_id
location = SQLLocation.by_location_id(location_id)
initial['location_filter_definition'] = {
'location_id': location_id,
'include_child_locations': definition.include_child_locations,
'name': location.name,
}
elif isinstance(definition, UCRFilterDefinition):
ucr_filter_definitions.append({
'configured_filter': definition.configured_filter,
})
initial['custom_match_definitions'] = json.dumps(custom_match_definitions)
initial['property_match_definitions'] = json.dumps(property_match_definitions)
initial['ucr_filter_definitions'] = ucr_filter_definitions
return initial
@property
def show_fieldset_title(self):
return True
@property
def fieldset_help_text(self):
return _("The Actions will be performed for all open cases that match all filter criteria below.")
@property
def allow_parent_case_references(self):
return True
@property
def allow_case_modified_filter(self):
return True
@property
def allow_case_property_filter(self):
return True
@property
def allow_date_case_property_filter(self):
return True
@property
def allow_ucr_filter(self):
return CASE_UPDATES_UCR_FILTERS.enabled(self.domain)
@property
def allow_regex_case_property_match(self):
# The framework allows for this, it's just historically only
# been an option for messaging conditonal alert rules and not
# case update rules. So for now the option is just hidden in
# the case update rule UI.
return False
@property
def allow_locations_filter(self):
return False
@property
def allow_custom_filter(self):
return True
def __init__(self, domain, *args, **kwargs):
if 'initial' in kwargs:
raise ValueError(_("Initial values are set by the form."))
self.is_system_admin = kwargs.pop('is_system_admin', False)
self.couch_user = kwargs.pop('couch_user', None)
self.domain = domain
self.initial_rule = kwargs.pop('rule', None)
if self.initial_rule:
kwargs['initial'] = self.compute_initial(domain, self.initial_rule)
super(CaseRuleCriteriaForm, self).__init__(*args, **kwargs)
self.set_case_type_choices(self.initial.get('case_type'))
self.fields['criteria_operator'].choices = AutomaticUpdateRule.CriteriaOperator.choices
self.helper = HQFormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Fieldset(
_("Case Filters") if self.show_fieldset_title else "",
HTML(
'<p class="help-block alert alert-info"><i class="fa fa-info-circle"></i> %s</p>' % self.fieldset_help_text
),
hidden_bound_field('filter_on_server_modified', 'filterOnServerModified'),
hidden_bound_field('server_modified_boundary', 'serverModifiedBoundary'),
hidden_bound_field('custom_match_definitions', 'customMatchDefinitions'),
hidden_bound_field('property_match_definitions', 'propertyMatchDefinitions'),
hidden_bound_field('filter_on_closed_parent', 'filterOnClosedParent'),
hidden_bound_field('location_filter_definition', 'locationFilterDefinition'),
hidden_bound_field('ucr_filter_definitions', 'ucrFilterDefinitions'),
Div(data_bind="template: {name: 'case-filters'}"),
css_id="rule-criteria-panel",
),
)
self.form_beginning_helper = HQFormHelper()
self.form_beginning_helper.form_tag = False
self.form_beginning_helper.layout = Layout(
Fieldset(
_("Rule Criteria"),
Field('case_type', data_bind="value: caseType, staticSelect2: {}"),
Field('criteria_operator'),
)
)
self.custom_filters = settings.AVAILABLE_CUSTOM_RULE_CRITERIA.keys()
def user_locations(self):
if self.couch_user:
user_locations = SQLLocation.objects.accessible_to_user(self.domain, self.couch_user)
return [
{'location_id': location.location_id, 'name': location.name}
for location in user_locations
]
return []
@property
@memoized
def requires_system_admin_to_edit(self):
if 'custom_match_definitions' not in self.initial:
return False
custom_criteria = json.loads(self.initial['custom_match_definitions'])
return len(custom_criteria) > 0
@property
@memoized
def requires_system_admin_to_save(self):
return len(self.cleaned_data['custom_match_definitions']) > 0
def _json_fail_hard(self):
raise ValueError(_("Invalid JSON object given"))
def set_case_type_choices(self, initial):
case_types = [''] + list(get_case_types_for_domain(self.domain))
if initial and initial not in case_types:
# Include the deleted case type in the list of choices so that
# we always allow proper display and edit of rules
case_types.append(initial)
case_types.sort()
self.fields['case_type'].choices = (
(case_type, case_type) for case_type in case_types
)
def clean_filter_on_server_modified(self):
return true_or_false(self.cleaned_data.get('filter_on_server_modified'))
def clean_server_modified_boundary(self):
# Be explicit about this check to prevent any accidents in the future
if self.cleaned_data['filter_on_server_modified'] is False:
return None
value = self.cleaned_data.get('server_modified_boundary')
return validate_non_negative_days(value)
def clean_custom_match_definitions(self):
value = self.cleaned_data.get('custom_match_definitions')
try:
value = json.loads(value)
except (TypeError, ValueError):
self._json_fail_hard()
if not isinstance(value, list):
self._json_fail_hard()
result = []
for obj in value:
if not isinstance(obj, dict):
self._json_fail_hard()
if 'name' not in obj:
self._json_fail_hard()
name = obj['name'].strip()
result.append({
'name': name
})
return result
def clean_property_match_definitions(self):
value = self.cleaned_data.get('property_match_definitions')
try:
value = json.loads(value)
except (TypeError, ValueError):
self._json_fail_hard()
if not isinstance(value, list):
self._json_fail_hard()
result = []
for obj in value:
if not isinstance(obj, dict):
self._json_fail_hard()
if (
'property_name' not in obj or
'property_value' not in obj or
'match_type' not in obj
):
self._json_fail_hard()
property_name = validate_case_property_name(obj['property_name'],
allow_parent_case_references=self.allow_parent_case_references)
match_type = obj['match_type']
if match_type not in MatchPropertyDefinition.MATCH_CHOICES:
self._json_fail_hard()
if match_type in (
MatchPropertyDefinition.MATCH_HAS_VALUE,
MatchPropertyDefinition.MATCH_HAS_NO_VALUE,
):
result.append({
'property_name': property_name,
'property_value': None,
'match_type': match_type,
})
elif match_type in (
MatchPropertyDefinition.MATCH_EQUAL,
MatchPropertyDefinition.MATCH_NOT_EQUAL,
):
property_value = validate_case_property_value(obj['property_value'])
result.append({
'property_name': property_name,
'property_value': property_value,
'match_type': match_type,
})
elif match_type in (
MatchPropertyDefinition.MATCH_DAYS_BEFORE,
MatchPropertyDefinition.MATCH_DAYS_AFTER,
):
property_value = obj['property_value']
try:
property_value = int(property_value)
except (TypeError, ValueError):
raise ValidationError(_("Please enter a number of days"))
result.append({
'property_name': property_name,
'property_value': str(property_value),
'match_type': match_type,
})
elif match_type == MatchPropertyDefinition.MATCH_REGEX:
property_value = obj['property_value']
if not property_value:
raise ValidationError(_("Please enter a valid regular expression to match"))
try:
re.compile(property_value)
except (re.error, ValueError, TypeError):
raise ValidationError(_("Please enter a valid regular expression to match"))
result.append({
'property_name': property_name,
'property_value': property_value,
'match_type': match_type,
})
return result
def clean_filter_on_closed_parent(self):
return true_or_false(self.cleaned_data.get('filter_on_closed_parent'))
def clean_location_filter_definition(self):
value = self.cleaned_data.get('location_filter_definition')
try:
if value:
value = json.loads(value)
else:
return ''
except (TypeError, ValueError):
self._json_fail_hard()
if value:
location_def = value[0]
if not location_def.get('include_child_locations'):
location_def['include_child_locations'] = False
return location_def
return ''
def clean_ucr_filter_definitions(self):
value = self.cleaned_data.get('ucr_filter_definitions')
if not isinstance(value, list):
self._json_fail_hard()
result = []
for obj in value:
if not isinstance(obj, dict):
self._json_fail_hard()
try:
spec = json.loads(obj['configured_filter'])
except (TypeError, ValueError):
self._json_fail_hard()
try:
FilterFactory.from_spec(spec, FactoryContext.empty(domain=self.domain))
except BadSpecError as error:
message = _("There was a problem with a UCR Filter Definition: ")
raise ValidationError(message + str(error))
result.append(obj)
return result
def save_criteria(self, rule, save_meta=True):
with transaction.atomic():
if save_meta:
rule.case_type = self.cleaned_data['case_type']
rule.criteria_operator = self.cleaned_data['criteria_operator']
rule.filter_on_server_modified = self.cleaned_data['filter_on_server_modified']
rule.server_modified_boundary = self.cleaned_data['server_modified_boundary']
rule.save()
rule.delete_criteria()
for item in self.cleaned_data['property_match_definitions']:
definition = MatchPropertyDefinition.objects.create(
property_name=item['property_name'],
property_value=item['property_value'],
match_type=item['match_type'],
)
criteria = CaseRuleCriteria(rule=rule)
criteria.definition = definition
criteria.save()
for item in self.cleaned_data['custom_match_definitions']:
definition = CustomMatchDefinition.objects.create(
name=item['name'],
)
criteria = CaseRuleCriteria(rule=rule)
criteria.definition = definition
criteria.save()
for item in self.cleaned_data['ucr_filter_definitions']:
definition = UCRFilterDefinition.objects.create(
configured_filter=item['configured_filter']
)
criteria = CaseRuleCriteria(rule=rule)
criteria.definition = definition
criteria.save()
if self.cleaned_data['filter_on_closed_parent']:
definition = ClosedParentDefinition.objects.create()
criteria = CaseRuleCriteria(rule=rule)
criteria.definition = definition
criteria.save()
if self.cleaned_data['location_filter_definition']:
definition_data = self.cleaned_data['location_filter_definition']
if definition_data and definition_data['location_id']:
definition = LocationFilterDefinition.objects.create(
location_id=definition_data['location_id'],
include_child_locations=definition_data.get('include_child_locations', False),
)
criteria = CaseRuleCriteria(rule=rule)
criteria.definition = definition
criteria.save()
class CaseRuleActionsForm(forms.Form):
# Prefix to avoid name collisions; this means all input
# names in the HTML are prefixed with "action-"
prefix = "action"
close_case = forms.CharField(required=False, initial='false')
properties_to_update = forms.CharField(required=False, initial='[]')
custom_action_definitions = forms.CharField(required=False, initial='[]')
@property
def current_values(self):
return {
'close_case': self['close_case'].value(),
'properties_to_update': json.loads(self['properties_to_update'].value()),
'custom_action_definitions': json.loads(self['custom_action_definitions'].value()),
}
def compute_initial(self, domain, rule):
initial = {}
custom_action_definitions = []
for action in rule.memoized_actions:
definition = action.definition
if isinstance(definition, UpdateCaseDefinition):
if definition.close_case:
initial['close_case'] = 'true'
initial['properties_to_update'] = json.dumps(definition.properties_to_update)
elif isinstance(definition, CustomActionDefinition):
custom_action_definitions.append({
'name': definition.name,
})
initial['custom_action_definitions'] = json.dumps(custom_action_definitions)
return initial
def __init__(self, domain, *args, **kwargs):
if 'initial' in kwargs:
raise ValueError(_("Initial values are set by the form."))
self.is_system_admin = kwargs.pop('is_system_admin', False)
rule = kwargs.pop('rule', None)
if rule:
kwargs['initial'] = self.compute_initial(domain, rule)
super(CaseRuleActionsForm, self).__init__(*args, **kwargs)
self.domain = domain
self.helper = HQFormHelper()
self.helper.form_tag = False
self.helper.form_show_errors = False
self.helper.layout = Layout(
Fieldset(
_("Actions"),
hidden_bound_field('close_case', 'closeCase'),
hidden_bound_field('properties_to_update', 'propertiesToUpdate'),
hidden_bound_field('custom_action_definitions', 'customActionDefinitions'),
Div(data_bind="template: {name: 'case-actions'}"),
css_id="rule-actions",
),
)
self.custom_actions = settings.AVAILABLE_CUSTOM_RULE_ACTIONS.keys()
@property
def constants(self):
return {
'VALUE_TYPE_EXACT': UpdateCaseDefinition.VALUE_TYPE_EXACT,
'VALUE_TYPE_CASE_PROPERTY': UpdateCaseDefinition.VALUE_TYPE_CASE_PROPERTY,
}
@property
@memoized
def requires_system_admin_to_edit(self):
if 'custom_action_definitions' not in self.initial:
return False
custom_actions = json.loads(self.initial['custom_action_definitions'])
return len(custom_actions) > 0
@property
@memoized
def requires_system_admin_to_save(self):
return len(self.cleaned_data['custom_action_definitions']) > 0
def _json_fail_hard(self):
raise ValueError("Invalid JSON object given")
def clean_close_case(self):
return true_or_false(self.cleaned_data.get('close_case'))
def clean_properties_to_update(self):
value = self.cleaned_data.get('properties_to_update')
try:
value = json.loads(value)
except (TypeError, ValueError):
self._json_fail_hard()
if not isinstance(value, list):
self._json_fail_hard()
result = []
for obj in value:
if not isinstance(obj, dict):
self._json_fail_hard()
if (
'name' not in obj or
'value_type' not in obj or
'value' not in obj
):
self._json_fail_hard()
name = validate_case_property_name(obj['name'])
value_type = obj['value_type']
if value_type not in UpdateCaseDefinition.VALUE_TYPE_CHOICES:
self._json_fail_hard()
if value_type == UpdateCaseDefinition.VALUE_TYPE_EXACT:
value = validate_case_property_value(obj['value'])
elif value_type == UpdateCaseDefinition.VALUE_TYPE_CASE_PROPERTY:
value = validate_case_property_name(obj['value'])
result.append(
UpdateCaseDefinition.PropertyDefinition(
name=name,
value_type=value_type,
value=value,
)
)
return result
def clean_custom_action_definitions(self):
value = self.cleaned_data.get('custom_action_definitions')
try:
value = json.loads(value)
except (TypeError, ValueError):
self._json_fail_hard()
if not isinstance(value, list):
self._json_fail_hard()
result = []
for obj in value:
if not isinstance(obj, dict):
self._json_fail_hard()
if 'name' not in obj:
self._json_fail_hard()
name = obj['name'].strip()
result.append({
'name': name
})
return result
def clean(self):
cleaned_data = super(CaseRuleActionsForm, self).clean()
if (
'close_case' in cleaned_data and
'properties_to_update' in cleaned_data and
'custom_action_definitions' in cleaned_data
):
# All fields passed individual validation
if (
not cleaned_data['close_case'] and
not cleaned_data['properties_to_update'] and
not cleaned_data['custom_action_definitions']
):
raise ValidationError(_("Please specify at least one action."))
def save_actions(self, rule):
with transaction.atomic():
rule.delete_actions()
if self.cleaned_data['close_case'] or self.cleaned_data['properties_to_update']:
definition = UpdateCaseDefinition(close_case=self.cleaned_data['close_case'])
definition.set_properties_to_update(self.cleaned_data['properties_to_update'])
definition.save()
action = CaseRuleAction(rule=rule)
action.definition = definition
action.save()
for item in self.cleaned_data['custom_action_definitions']:
definition = CustomActionDefinition.objects.create(
name=item['name'],
)
action = CaseRuleAction(rule=rule)
action.definition = definition
action.save()
class DedupeCaseFilterForm(CaseRuleCriteriaForm):
prefix = 'case-filter'
case_type = forms.ChoiceField(
label=gettext_lazy("Case Type"),
required=False,
)
@property
def fieldset_help_text(self):
return _("The rule will be applied to all cases that match all filter criteria below.")
@property
def allow_case_modified_filter(self):
return False
@property
def allow_case_property_filter(self):
return True
@property
def allow_date_case_property_filter(self):
return False
@property
def allow_locations_filter(self):
return True
@property
def allow_parent_case_references(self):
return False
@property
def allow_custom_filter(self):
return False
def __init__(self, domain, *args, **kwargs):
couch_user = kwargs.get('couch_user', None)
kwargs['is_system_admin'] = couch_user.is_superuser if couch_user else False
super(DedupeCaseFilterForm, self).__init__(domain, *args, **kwargs)
self.helper = HQFormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Fieldset(
_("Cases Filter") if self.show_fieldset_title else "",
HTML(
'<p class="help-block alert alert-info"><i class="fa fa-info-circle"></i> %s</p>'
% self.fieldset_help_text
),
hidden_bound_field('property_match_definitions', 'propertyMatchDefinitions'),
hidden_bound_field('location_filter_definition', 'locationFilterDefinition'),
Div(data_bind="template: {name: 'case-filters'}"),
css_id="rule-criteria-panel",
),
)
self.form_beginning_helper = None
def clean_filter_on_server_modified(self):
return False
def clean_server_modified_boundary(self):
return None
def clean_custom_match_definitions(self):
return []
def clean_filter_on_closed_parent(self):
return False
def clean_ucr_filter_definitions(self):
return []
|
{
"content_hash": "fc17233911842e4a5150bd35c8053575",
"timestamp": "",
"source": "github",
"line_count": 994,
"max_line_length": 127,
"avg_line_length": 35.145875251509054,
"alnum_prop": 0.588178044940604,
"repo_name": "dimagi/commcare-hq",
"id": "6a0a067ed6178efaa3360f25c887d50cffb09202",
"size": "34935",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/data_interfaces/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
}
|
from setuptools import setup
from Cython.Build import cythonize
import Cython.Compiler.Options
Cython.Compiler.Options.annotate = True
setup(
name="cyintegrate",
ext_modules=cythonize('cyintegrate.pyx', compiler_directives={'embedsignature': True}),
)
|
{
"content_hash": "a51580fe2e65c169565895c7ea800db9",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 91,
"avg_line_length": 26.2,
"alnum_prop": 0.7786259541984732,
"repo_name": "tleonhardt/Python_Interface_Cpp",
"id": "7546d6c9e4874ab80f9d1fdd74c29d67d7c98883",
"size": "277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cython/integrate/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "72"
},
{
"name": "C",
"bytes": "32152"
},
{
"name": "C++",
"bytes": "12471"
},
{
"name": "CMake",
"bytes": "524"
},
{
"name": "Python",
"bytes": "32849"
},
{
"name": "Ruby",
"bytes": "296"
},
{
"name": "Shell",
"bytes": "1963"
}
],
"symlink_target": ""
}
|
import posix
from nova import test
from nova.virt.docker import hostinfo
class HostInfoTestCase(test.NoDBTestCase):
def setUp(self):
super(HostInfoTestCase, self).setUp()
hostinfo.get_meminfo = self.get_meminfo
hostinfo.statvfs = self.statvfs
def get_meminfo(self):
data = ['MemTotal: 1018784 kB\n',
'MemFree: 220060 kB\n',
'Buffers: 21640 kB\n',
'Cached: 63364 kB\n']
return data
def statvfs(self):
seq = (4096, 4096, 10047582, 7332259, 6820195,
2564096, 2271310, 2271310, 1024, 255)
return posix.statvfs_result(sequence=seq)
def test_get_disk_usage(self):
disk_usage = hostinfo.get_disk_usage()
self.assertEqual(disk_usage['total'], 41154895872)
self.assertEqual(disk_usage['available'], 27935518720)
self.assertEqual(disk_usage['used'], 11121963008)
def test_parse_meminfo(self):
meminfo = hostinfo.parse_meminfo()
self.assertEqual(meminfo['memtotal'], 1043234816)
self.assertEqual(meminfo['memfree'], 225341440)
self.assertEqual(meminfo['cached'], 64884736)
self.assertEqual(meminfo['buffers'], 22159360)
def test_get_memory_usage(self):
usage = hostinfo.get_memory_usage()
self.assertEqual(usage['total'], 1043234816)
self.assertEqual(usage['used'], 730849280)
self.assertEqual(usage['free'], 312385536)
|
{
"content_hash": "1224ca4a84d2a8dc80008cb9ce95cc30",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 62,
"avg_line_length": 34.93023255813954,
"alnum_prop": 0.6198402130492676,
"repo_name": "TieWei/nova",
"id": "c66768896da174a93089a9be9f9cb6f945f3a432",
"size": "2182",
"binary": false,
"copies": "11",
"ref": "refs/heads/enhanced/havana",
"path": "nova/tests/virt/docker/test_hostinfo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13405036"
},
{
"name": "Shell",
"bytes": "17194"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from distutils import dir_util
from pytest import fixture
import os
@fixture
def datadir(tmpdir, request):
"""
Fixture responsible for searching a folder with the same name of test
module and, if available, moving all contents to a temporary directory so
tests can use them freely.
"""
filename = request.module.__file__
test_dir, _ = os.path.splitext(filename)
if os.path.isdir(test_dir):
dir_util.copy_tree(test_dir, bytes(tmpdir))
return tmpdir
|
{
"content_hash": "f4500d722d5aba97bc9aa4921314140a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 77,
"avg_line_length": 26.8,
"alnum_prop": 0.7014925373134329,
"repo_name": "SirEdvin/Pandas-Pipe",
"id": "3c97a0fed655933de8243160a9422cbf560c6553",
"size": "536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6998"
},
{
"name": "Makefile",
"bytes": "7434"
},
{
"name": "Python",
"bytes": "45117"
}
],
"symlink_target": ""
}
|
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class EnvelopeTemplateResults(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'end_position': 'str',
'envelope_templates': 'list[EnvelopeTemplate]',
'folders': 'list[Folder]',
'next_uri': 'str',
'previous_uri': 'str',
'result_set_size': 'str',
'start_position': 'str',
'total_set_size': 'str'
}
attribute_map = {
'end_position': 'endPosition',
'envelope_templates': 'envelopeTemplates',
'folders': 'folders',
'next_uri': 'nextUri',
'previous_uri': 'previousUri',
'result_set_size': 'resultSetSize',
'start_position': 'startPosition',
'total_set_size': 'totalSetSize'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""EnvelopeTemplateResults - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._end_position = None
self._envelope_templates = None
self._folders = None
self._next_uri = None
self._previous_uri = None
self._result_set_size = None
self._start_position = None
self._total_set_size = None
self.discriminator = None
setattr(self, "_{}".format('end_position'), kwargs.get('end_position', None))
setattr(self, "_{}".format('envelope_templates'), kwargs.get('envelope_templates', None))
setattr(self, "_{}".format('folders'), kwargs.get('folders', None))
setattr(self, "_{}".format('next_uri'), kwargs.get('next_uri', None))
setattr(self, "_{}".format('previous_uri'), kwargs.get('previous_uri', None))
setattr(self, "_{}".format('result_set_size'), kwargs.get('result_set_size', None))
setattr(self, "_{}".format('start_position'), kwargs.get('start_position', None))
setattr(self, "_{}".format('total_set_size'), kwargs.get('total_set_size', None))
@property
def end_position(self):
"""Gets the end_position of this EnvelopeTemplateResults. # noqa: E501
The last position in the result set. # noqa: E501
:return: The end_position of this EnvelopeTemplateResults. # noqa: E501
:rtype: str
"""
return self._end_position
@end_position.setter
def end_position(self, end_position):
"""Sets the end_position of this EnvelopeTemplateResults.
The last position in the result set. # noqa: E501
:param end_position: The end_position of this EnvelopeTemplateResults. # noqa: E501
:type: str
"""
self._end_position = end_position
@property
def envelope_templates(self):
"""Gets the envelope_templates of this EnvelopeTemplateResults. # noqa: E501
The list of requested templates. # noqa: E501
:return: The envelope_templates of this EnvelopeTemplateResults. # noqa: E501
:rtype: list[EnvelopeTemplate]
"""
return self._envelope_templates
@envelope_templates.setter
def envelope_templates(self, envelope_templates):
"""Sets the envelope_templates of this EnvelopeTemplateResults.
The list of requested templates. # noqa: E501
:param envelope_templates: The envelope_templates of this EnvelopeTemplateResults. # noqa: E501
:type: list[EnvelopeTemplate]
"""
self._envelope_templates = envelope_templates
@property
def folders(self):
"""Gets the folders of this EnvelopeTemplateResults. # noqa: E501
# noqa: E501
:return: The folders of this EnvelopeTemplateResults. # noqa: E501
:rtype: list[Folder]
"""
return self._folders
@folders.setter
def folders(self, folders):
"""Sets the folders of this EnvelopeTemplateResults.
# noqa: E501
:param folders: The folders of this EnvelopeTemplateResults. # noqa: E501
:type: list[Folder]
"""
self._folders = folders
@property
def next_uri(self):
"""Gets the next_uri of this EnvelopeTemplateResults. # noqa: E501
The URI to the next chunk of records based on the search request. If the endPosition is the entire results of the search, this is null. # noqa: E501
:return: The next_uri of this EnvelopeTemplateResults. # noqa: E501
:rtype: str
"""
return self._next_uri
@next_uri.setter
def next_uri(self, next_uri):
"""Sets the next_uri of this EnvelopeTemplateResults.
The URI to the next chunk of records based on the search request. If the endPosition is the entire results of the search, this is null. # noqa: E501
:param next_uri: The next_uri of this EnvelopeTemplateResults. # noqa: E501
:type: str
"""
self._next_uri = next_uri
@property
def previous_uri(self):
"""Gets the previous_uri of this EnvelopeTemplateResults. # noqa: E501
The postal code for the billing address. # noqa: E501
:return: The previous_uri of this EnvelopeTemplateResults. # noqa: E501
:rtype: str
"""
return self._previous_uri
@previous_uri.setter
def previous_uri(self, previous_uri):
"""Sets the previous_uri of this EnvelopeTemplateResults.
The postal code for the billing address. # noqa: E501
:param previous_uri: The previous_uri of this EnvelopeTemplateResults. # noqa: E501
:type: str
"""
self._previous_uri = previous_uri
@property
def result_set_size(self):
"""Gets the result_set_size of this EnvelopeTemplateResults. # noqa: E501
The number of results returned in this response. # noqa: E501
:return: The result_set_size of this EnvelopeTemplateResults. # noqa: E501
:rtype: str
"""
return self._result_set_size
@result_set_size.setter
def result_set_size(self, result_set_size):
"""Sets the result_set_size of this EnvelopeTemplateResults.
The number of results returned in this response. # noqa: E501
:param result_set_size: The result_set_size of this EnvelopeTemplateResults. # noqa: E501
:type: str
"""
self._result_set_size = result_set_size
@property
def start_position(self):
"""Gets the start_position of this EnvelopeTemplateResults. # noqa: E501
Starting position of the current result set. # noqa: E501
:return: The start_position of this EnvelopeTemplateResults. # noqa: E501
:rtype: str
"""
return self._start_position
@start_position.setter
def start_position(self, start_position):
"""Sets the start_position of this EnvelopeTemplateResults.
Starting position of the current result set. # noqa: E501
:param start_position: The start_position of this EnvelopeTemplateResults. # noqa: E501
:type: str
"""
self._start_position = start_position
@property
def total_set_size(self):
"""Gets the total_set_size of this EnvelopeTemplateResults. # noqa: E501
The total number of items available in the result set. This will always be greater than or equal to the value of the property returning the results in the in the response. # noqa: E501
:return: The total_set_size of this EnvelopeTemplateResults. # noqa: E501
:rtype: str
"""
return self._total_set_size
@total_set_size.setter
def total_set_size(self, total_set_size):
"""Sets the total_set_size of this EnvelopeTemplateResults.
The total number of items available in the result set. This will always be greater than or equal to the value of the property returning the results in the in the response. # noqa: E501
:param total_set_size: The total_set_size of this EnvelopeTemplateResults. # noqa: E501
:type: str
"""
self._total_set_size = total_set_size
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EnvelopeTemplateResults, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EnvelopeTemplateResults):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, EnvelopeTemplateResults):
return True
return self.to_dict() != other.to_dict()
|
{
"content_hash": "edbb5fdfa40288eef6b7ab4898a7f3c7",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 193,
"avg_line_length": 33.79421221864952,
"alnum_prop": 0.6113225499524263,
"repo_name": "docusign/docusign-python-client",
"id": "3f33601cf7b6d779cb28bad2fffc6fffcd49ae94",
"size": "10527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docusign_esign/models/envelope_template_results.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9687716"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from .bspecs import Bspecs
from .monte import Monte
from .eci import ECI
from .helpers import *
from . import monte
from . import ECI
from . import properties
|
{
"content_hash": "db63d7f904c2bf637495187dee4ee8ca",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 38,
"avg_line_length": 22.11111111111111,
"alnum_prop": 0.7688442211055276,
"repo_name": "goirijo/thermoplotting",
"id": "a272f93252b3b5291728c772103d4f165c98e611",
"size": "199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thermoplotting/casmfiles/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "7184"
},
{
"name": "Python",
"bytes": "242140"
},
{
"name": "Shell",
"bytes": "2264"
}
],
"symlink_target": ""
}
|
from rlkit.policies.base import Policy
class RandomPolicy(Policy):
"""
Policy that always outputs zero.
"""
def __init__(self, action_space):
self.action_space = action_space
def get_action(self, obs):
return self.action_space.sample(), {}
|
{
"content_hash": "29174fb83ea728525c8cb8c45edb7ee6",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 45,
"avg_line_length": 21.53846153846154,
"alnum_prop": 0.6321428571428571,
"repo_name": "google-research/DBAP-algorithm",
"id": "ef9ef71935290e1b329831d2a77d2bc0a915da76",
"size": "856",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "third_party/rlkit_library/rlkit/policies/simple.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5294"
}
],
"symlink_target": ""
}
|
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.dns.types import RecordType, ZoneDoesNotExistError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.drivers.hostvirtual import HostVirtualDNSDriver
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.test.secrets import DNS_PARAMS_HOSTVIRTUAL
class HostVirtualTests(unittest.TestCase):
def setUp(self):
HostVirtualDNSDriver.connectionCls.conn_classes = (
None, HostVirtualMockHttp)
HostVirtualMockHttp.type = None
self.driver = HostVirtualDNSDriver(*DNS_PARAMS_HOSTVIRTUAL)
def test_list_record_types(self):
record_types = self.driver.list_record_types()
self.assertEqual(len(record_types), 7)
self.assertTrue(RecordType.A in record_types)
def test_list_zones(self):
zones = self.driver.list_zones()
self.assertEqual(len(zones), 5)
zone = zones[0]
self.assertEqual(zone.id, '47234')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, 't.com')
self.assertEqual(zone.ttl, '3600')
def test_list_records(self):
zone = self.driver.list_zones()[0]
records = self.driver.list_records(zone=zone)
self.assertEqual(len(records), 3)
record = records[1]
self.assertEqual(record.name, 'www.t.com')
self.assertEqual(record.id, '300719')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '208.111.35.173')
def test_get_zone(self):
zone = self.driver.get_zone(zone_id='47234')
self.assertEqual(zone.id, '47234')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, 't.com')
self.assertEqual(zone.ttl, '3600')
def test_get_record(self):
record = self.driver.get_record(zone_id='47234', record_id='300377')
self.assertEqual(record.id, '300377')
self.assertEqual(record.name, '*.t.com')
self.assertEqual(record.type, RecordType.CNAME)
self.assertEqual(record.data, 't.com')
def test_list_records_zone_does_not_exist(self):
zone = self.driver.list_zones()[0]
HostVirtualMockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.list_records(zone=zone)
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, zone.id)
else:
self.fail('Exception was not thrown')
def test_get_zone_does_not_exist(self):
HostVirtualMockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.get_zone(zone_id='4444')
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, '4444')
else:
self.fail('Exception was not thrown')
def test_get_record_zone_does_not_exist(self):
HostVirtualMockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.get_record(zone_id='4444', record_id='28536')
except ZoneDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_record_record_does_not_exist(self):
HostVirtualMockHttp.type = 'RECORD_DOES_NOT_EXIST'
try:
self.driver.get_record(zone_id='47234', record_id='4444')
except RecordDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_create_zone(self):
zone = self.driver.create_zone(domain='t.com', type='master',
ttl=None, extra=None)
self.assertEqual(zone.id, '47234')
self.assertEqual(zone.domain, 't.com')
def test_update_zone(self):
zone = self.driver.list_zones()[0]
updated_zone = self.driver.update_zone(zone=zone, domain='tt.com')
self.assertEqual(updated_zone.id, zone.id)
self.assertEqual(updated_zone.domain, 'tt.com')
self.assertEqual(updated_zone.type, zone.type)
self.assertEqual(updated_zone.ttl, '3600')
def test_create_record(self):
zone = self.driver.list_zones()[0]
record = self.driver.create_record(
name='www', zone=zone,
type=RecordType.A, data='127.0.0.1'
)
self.assertEqual(record.id, '300377')
self.assertEqual(record.name, 'www')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '127.0.0.1')
def test_update_record(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[1]
updated_record = self.driver.update_record(record=record, name='www',
type=RecordType.AAAA,
data='::1')
self.assertEqual(record.data, '208.111.35.173')
self.assertEqual(updated_record.id, record.id)
self.assertEqual(updated_record.name, 'www')
self.assertEqual(updated_record.zone, record.zone)
self.assertEqual(updated_record.type, RecordType.AAAA)
self.assertEqual(updated_record.data, '::1')
def test_delete_zone(self):
zone = self.driver.list_zones()[0]
status = self.driver.delete_zone(zone=zone)
self.assertTrue(status)
def test_delete_zone_does_not_exist(self):
zone = self.driver.list_zones()[0]
HostVirtualMockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.delete_zone(zone=zone)
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, zone.id)
else:
self.fail('Exception was not thrown')
def test_delete_record(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
status = self.driver.delete_record(record=record)
self.assertTrue(status)
def test_delete_record_does_not_exist(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
HostVirtualMockHttp.type = 'RECORD_DOES_NOT_EXIST'
try:
self.driver.delete_record(record=record)
except RecordDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.record_id, record.id)
else:
self.fail('Exception was not thrown')
class HostVirtualMockHttp(MockHttp):
fixtures = DNSFileFixtures('hostvirtual')
def _vapi_dns_zone(self, method, url, body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _vapi_dns_zones(self, method, url, body, headers):
body = self.fixtures.load('list_zones.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _vapi_dns_record(self, method, url, body, headers):
body = self.fixtures.load('get_record.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _vapi_dns_records(self, method, url, body, headers):
body = self.fixtures.load('list_records.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _vapi_dns_zone_ZONE_DOES_NOT_EXIST(self, method, url, body, headers):
body = self.fixtures.load('zone_does_not_exist.json')
return (httplib.NOT_FOUND, body,
{}, httplib.responses[httplib.NOT_FOUND])
def _vapi_dns_zone_RECORD_DOES_NOT_EXIST(self, method, url, body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _vapi_dns_zones_ZONE_DOES_NOT_EXIST(self, method, url, body, headers):
body = self.fixtures.load('zone_does_not_exist.json')
return (httplib.NOT_FOUND, body,
{}, httplib.responses[httplib.NOT_FOUND])
def _vapi_dns_record_ZONE_DOES_NOT_EXIST(self, method,
url, body, headers):
body = self.fixtures.load('zone_does_not_exist.json')
return (httplib.NOT_FOUND, body,
{}, httplib.responses[httplib.NOT_FOUND])
def _vapi_dns_record_RECORD_DOES_NOT_EXIST(self, method,
url, body, headers):
body = self.fixtures.load('zone_does_not_exist.json')
return (httplib.NOT_FOUND, body,
{}, httplib.responses[httplib.NOT_FOUND])
def _vapi_dns_records_ZONE_DOES_NOT_EXIST(self, method,
url, body, headers):
body = self.fixtures.load('zone_does_not_exist.json')
return (httplib.NOT_FOUND, body,
{}, httplib.responses[httplib.NOT_FOUND])
def _vapi_dns_zones_RECORD_DOES_NOT_EXIST(self, method,
url, body, headers):
body = self.fixtures.load('zone_does_not_exist.json')
return (httplib.NOT_FOUND, body,
{}, httplib.responses[httplib.NOT_FOUND])
if __name__ == '__main__':
sys.exit(unittest.main())
|
{
"content_hash": "aeb2aef5ac1b58d8447e7968370e9c24",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 79,
"avg_line_length": 38.111111111111114,
"alnum_prop": 0.6111651009610193,
"repo_name": "Jc2k/libcloud",
"id": "3ff404f870739d0a910c1df131064b0ea78aacde",
"size": "10043",
"binary": false,
"copies": "2",
"ref": "refs/heads/trunk",
"path": "libcloud/test/dns/test_hostvirtual.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2274647"
},
{
"name": "Shell",
"bytes": "13009"
}
],
"symlink_target": ""
}
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pogoprotos.inventory.item import item_id_pb2 as pogoprotos_dot_inventory_dot_item_dot_item__id__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/use_item_gym_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nBpogoprotos/networking/requests/messages/use_item_gym_message.proto\x12\'pogoprotos.networking.requests.messages\x1a\'pogoprotos/inventory/item/item_id.proto\"\x8a\x01\n\x11UseItemGymMessage\x12\x32\n\x07item_id\x18\x01 \x01(\x0e\x32!.pogoprotos.inventory.item.ItemId\x12\x0e\n\x06gym_id\x18\x02 \x01(\t\x12\x17\n\x0fplayer_latitude\x18\x03 \x01(\x01\x12\x18\n\x10player_longitude\x18\x04 \x01(\x01\x62\x06proto3')
,
dependencies=[pogoprotos_dot_inventory_dot_item_dot_item__id__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_USEITEMGYMMESSAGE = _descriptor.Descriptor(
name='UseItemGymMessage',
full_name='pogoprotos.networking.requests.messages.UseItemGymMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='item_id', full_name='pogoprotos.networking.requests.messages.UseItemGymMessage.item_id', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gym_id', full_name='pogoprotos.networking.requests.messages.UseItemGymMessage.gym_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_latitude', full_name='pogoprotos.networking.requests.messages.UseItemGymMessage.player_latitude', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='player_longitude', full_name='pogoprotos.networking.requests.messages.UseItemGymMessage.player_longitude', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=153,
serialized_end=291,
)
_USEITEMGYMMESSAGE.fields_by_name['item_id'].enum_type = pogoprotos_dot_inventory_dot_item_dot_item__id__pb2._ITEMID
DESCRIPTOR.message_types_by_name['UseItemGymMessage'] = _USEITEMGYMMESSAGE
UseItemGymMessage = _reflection.GeneratedProtocolMessageType('UseItemGymMessage', (_message.Message,), dict(
DESCRIPTOR = _USEITEMGYMMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.use_item_gym_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.UseItemGymMessage)
))
_sym_db.RegisterMessage(UseItemGymMessage)
# @@protoc_insertion_point(module_scope)
|
{
"content_hash": "fdb50e390bf46492ad35e18a07ee43c2",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 436,
"avg_line_length": 42.98888888888889,
"alnum_prop": 0.7428276040320496,
"repo_name": "bellowsj/aiopogo",
"id": "fbc49167ec272977634bbd5b256a40ad17067b9d",
"size": "4006",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiopogo/pogoprotos/networking/requests/messages/use_item_gym_message_pb2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62068"
}
],
"symlink_target": ""
}
|
__author__ = 'Bryan Gregory'
__email__ = 'bryan.gregory1@gmail.com'
__date__ = '12-24-2013'
#Internal modules
import utils
#Start logger to record all info, warnings, and errors to Logs/logfile.log
log = utils.start_logging(__name__)
import munge
import train
import data_io
import features
import ensembles
#External modules
import sys
import pandas as pd
from datetime import datetime
def main():
log.info('********New program instance started********')
#-------------Load Environment----------------------#
#Get program settings and model settings from SETTINGS.json file in root directory
settings, model_settings = utils.load_settings()
#If not using cached data, then load raw data, clean/munge it, create hand-crafted features, slice it for CV
if settings['use_cached_data'] == 'y':
log.info('==========LOADING CACHED FEATURES===========')
dfTrn = data_io.load_cached_object('dfTrn')
dfTest = data_io.load_cached_object('dfTest')
dfCV = data_io.load_flatfile_to_df('Data/CV.csv')
else:
#-------Data Loading/Cleaning/Munging------------#
#Load the data
log.info('===============LOADING DATA=================')
dfTrn = data_io.load_flatfile_to_df(settings['file_data_train'])
dfTest = data_io.load_flatfile_to_df(settings['file_data_test'])
dfCV = data_io.load_flatfile_to_df('Data/CV.csv')
#Clean/Munge the data
log.info('=======CLEANING AND MUNGING DATA============')
dfTrn = munge.clean(dfTrn)
dfTest = munge.clean(dfTest)
#-------Feature creation-------------------------#
#Add all currently used hand crafted features to dataframes
log.info('====CREATING HAND-CRAFTED DATA FEATURES=====')
features.add(dfTrn)
features.add(dfTest)
#---------Data slicing/parsing--------------------------#
#Split data for CV
if settings['generate_cv_score'] == 'y':
log.info('=====SPLITTING DATA FOR CROSS-VALIDATION====')
if settings['cv_method'] == 'april':
dfTrnCV, dfTestCV = munge.temporal_split(dfTrn, (2013, 04, 1))
elif settings['cv_method'] == 'march':
#take an addtional week from February b/c of lack of remote_api source issues in March
dfTrnCV, dfTestCV = munge.temporal_split(dfTrn, (2013, 02, 21))
elif settings['cv_method'] == 'list_split':
#load stored list of data points and use those for CV
dfCVlist = pd.DataFrame({'id': data_io.load_cached_object('Cache/cv_issue_ids.pkl'), 'dummy': 0})
dfTrnCV, dfTestCV = munge.list_split(dfTrn, dfCVlist)
#--------------Modeling-------------------------#
#If cached models exist then load them for reuse into segment_models. Then run through model_settings and for
# each model where 'use_cached_model' is false then clear the cached model and recreate it fresh
log.info('=========LOADING CACHED MODELS==============')
segment_models = data_io.load_cached_object('segment_models')
if segment_models == None:
log.info('=========CACHED MODELS NOT LOADED===========')
for model in model_settings:
model['use_cached_model'] = 'n'
segment_models = []
#Initialize new model for models not set to use cache
log.info('=======INITIALIZING UN-CACHED MODELS========')
index = 0
for model in model_settings:
if model_settings[model]['use_cached_model'] == 'n':
new_model = ensembles.Model(model_name=model,target=model_settings[model]['target'],
segment=model_settings[model]['segment'],
estimator_class=model_settings[model]['estimator_class'],
estimator_params=model_settings[model]['estimator_params'],
features=model_settings[model]['features'],
postprocess_scalar=model_settings[model]['postprocess_scalar'])
#Flag the model as not cached, so that it does not get skipped when running the modeling process
new_model.use_cached_model='n'
#Project specific model attributes not part of base class
new_model.KNN_neighborhood_threshold=model_settings[model]['KNN_neighborhood_threshold']
new_model.sub_zip_neighborhood=model_settings[model]['sub_zip_neighborhood']
segment_models[index] = new_model
log.info('Model %s intialized at index %i' % (model,index))
index += 1
#Cross validate all segment models (optional)
if settings['export_cv_predictions_all_models'] == 'y' or settings['export_cv_predictions_new_models'] == 'y':
log.info('============CROSS VALIDATION================')
for model in segment_models[:]:
#If model has cached CV predictions then skip predicting and just export them (if selected in settings)
if hasattr(model,'dfCVPredictions'):
log.info('Cached CV predictions found. Using cached CV predictions.')
if settings['export_cv_predictions_all_models'] == 'y':
data_io.save_predictions(model.dfCVPredictions,model.target,model_name=model.model_name,
directory=settings['dir_submissions'],
estimator_class=model.estimator_class, note='CV_list')
else:
print_model_header(model)
#Prepare segment model: segment and create feature vectors for the CV data set
dfTrn_Segment, dfTest_Segment = prepare_segment_model(dfTrnCV,dfTestCV,model)
#Generate CV predictions
train.cross_validate(model, settings, dfTrn_Segment, dfTest_Segment)
#Cache the CV predictions as a dataframe stored in each segment model
model.dfCVPredictions = dfTest_Segment.ix[:,['id',model.target]]
if settings['export_cv_predictions_new_models'] == 'y':
data_io.save_predictions(model.dfCVPredictions,model.target,model_name=model.model_name,
directory=settings['dir_submissions'],
estimator_class=model.estimator_class, note='CV_list')
#Generate predictions on test set for all segment models (optional)
if settings['export_predictions_all_models'] == 'y' or settings['export_predictions_new_models'] == 'y'\
or settings['export_predictions_total'] == 'y':
log.info('=======GENERATING TEST PREDICTIONS==========')
for model in segment_models[:]:
#If model has cached test predictions then skip predicting and just export them (if selected in settings)
if hasattr(model,'dfPredictions'):
log.info('Cached test predictions found for model %s. Using cached predictions.' % model.model_name)
if settings['export_predictions_all_models'] == 'y':
data_io.save_predictions(model.dfPredictions,model.target,model_name=model.model_name,
directory=settings['dir_submissions'],
estimator_class=model.estimator_class,note='TESTset')
else:
print_model_header(model)
#Prepare segment model: segment and create feature vectors for the full TEST data set
dfTrn_Segment, dfTest_Segment = prepare_segment_model(dfTrn,dfTest,model)
#Generate TEST set predictions
model.predict(dfTrn_Segment, dfTest_Segment)
if settings['export_predictions_all_models'] == 'y' or settings['export_predictions_new_models'] == 'y':
data_io.save_predictions(model.dfPredictions,model.target,model_name=model.model_name,
directory=settings['dir_submissions'],
estimator_class=model.estimator_class,note='TESTset')
log.info(utils.line_break())
#Cache the trained models and predictions to file (optional)
if settings['export_cached_models'] == 'y':
log.info('==========EXPORTING CACHED MODELS===========')
data_io.save_cached_object(segment_models,'segment_models')
#Merge each segment model's CV predictions into a master dataframe and export it (optional)----#
if settings['export_cv_predictions_total'] == 'y':
log.info('====MERGING CV PREDICTIONS FROM SEGMENTS====')
dfTestPredictionsTotal = merge_segment_predictions(segment_models, dfTestCV, cv=True)
#---Apply post process rules to master dataframe---#
#Set all votes and comments for remote_api segment to 1 and 0
dfTestPredictionsTotal = dfTestPredictionsTotal.merge(dfTest.ix[:][['source','id']], on='id', how='left')
for x in dfTestPredictionsTotal.index:
if dfTestPredictionsTotal.source[x] == 'remote_api_created':
dfTestPredictionsTotal.num_votes[x] = 1
dfTestPredictionsTotal.num_comments[x] = 0
#Export
timestamp = datetime.now().strftime('%m-%d-%y_%H%M')
filename = 'Submits/'+timestamp+'--bryan_CV_predictions.csv'
dfTestPredictionsTotal.to_csv(filename)
#Merge each segment model's TEST predictions into a master dataframe and export it (optional)----#
if settings['export_predictions_total'] == 'y':
log.info('===MERGING TEST PREDICTIONS FROM SEGMENTS===')
dfTestPredictionsTotal = merge_segment_predictions(segment_models, dfTest)
#---Apply post process rules to master dataframe---#
#Set all votes and comments for remote_api segment to 1 and 0
dfTestPredictionsTotal = dfTestPredictionsTotal.merge(dfTest.ix[:][['source','id']], on='id', how='left')
for x in dfTestPredictionsTotal.index:
if dfTestPredictionsTotal.source[x] == 'remote_api_created':
dfTestPredictionsTotal.num_votes[x] = 1
dfTestPredictionsTotal.num_comments[x] = 0
del dfTestPredictionsTotal['source']
#Export
filename = 'bryan_test_predictions.csv'
data_io.save_combined_predictions(dfTestPredictionsTotal, settings['dir_submissions'], filename)
#End main
log.info('********Program ran successfully. Exiting********')
##########################################################################################################
def prepare_segment_model(dfTrn,dfTest,model):
"""Given a segment model, create the data segment for that model, then create the feature values for that model
"""
#Segment the data
dfTrn_Segment, dfTest_Segment = munge.segment_data(dfTrn, dfTest, model.segment)
#Apply model-specific neighborhood subbing if enabled, and then apply KNN neighborhood thresholding
if int(model.KNN_neighborhood_threshold) > 0:
if model.sub_zip_neighborhood == 'y':
#Substitute zipcodes for overly common neighborhoods to provide more geographic detail
log.info('==USING ZIP FOR PLACEHOLDER NEIGHBORHOODS===')
features.sub_feature(dfTrn_Segment,'zipcode','neighborhood',
['Richmond','Oakland','Manchester','Chicago','New Haven'])
features.sub_feature(dfTest_Segment,'zipcode','neighborhood',
['Richmond','Oakland','Manchester','Chicago','New Haven'])
log.info('==KNN ON RARE NEIGHBORHOODS WITH COUNT < %i==' % int(model.KNN_neighborhood_threshold))
dfTrn_Segment = features.knn_thresholding(dfTrn_Segment,'neighborhood',
int(model.KNN_neighborhood_threshold))
dfTest_Segment = features.knn_thresholding(dfTest_Segment,'neighborhood',
int(model.KNN_neighborhood_threshold))
return dfTrn_Segment, dfTest_Segment
##########################################################################################################
def print_model_header(model):
"""Print header with model info
"""
features_list = (map(str,model.features.keys()))
features_list.sort()
log.info(utils.line_break())
log.info('MODEL: %s SEGMENT: %s TARGET: %s ' % (model.model_name, model.segment, model.target))
log.info('FEATURES: %s' % features_list)
log.info('ESTIMATOR CLASS: %s ' % model.estimator)
log.info('POST-PROCESS SCALAR: %s ' % model.postprocess_scalar)
##########################################################################################################
def merge_segment_predictions(segment_models, dfTest, cv=False):
"""Combine the predictions of all segment models into a master file
"""
all_predictions = {}
for model in segment_models[:]:
if model.target not in all_predictions.keys():
if cv == False:
all_predictions[model.target] = model.dfPredictions.ix[:]
else:
all_predictions[model.target] = model.dfCVPredictions.ix[:]
else:
#---Hack to fix Oakland remote_api overlap on views----#
if model.model_name == 'oakland_other_views':
model.dfPredictions = model.dfPredictions.merge(dfTest.ix[:][['source','id']], on='id', how='left')
model.dfPredictions = model.dfPredictions[model.dfPredictions.source != 'remote_api_created']
del model.dfPredictions['source']
#---End Hack-----#
if cv == False:
all_predictions[model.target] = pd.concat([all_predictions[model.target], model.dfPredictions])
else:
all_predictions[model.target] = pd.concat([all_predictions[model.target], model.dfCVPredictions])
dfTestPredictionsTotal = all_predictions['num_views'].merge(all_predictions['num_votes'], on='id', how='left')\
.merge(all_predictions['num_comments'], on = 'id', how='left')
return dfTestPredictionsTotal
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "3c0aa7bd3a63ac8ffa18afdbc238358c",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 120,
"avg_line_length": 57.88211382113821,
"alnum_prop": 0.5920359575812908,
"repo_name": "rolando/theusual-kaggle-seeclickfix-ensemble",
"id": "bb0aa4b44412e8e8a28526ce6a60166628b7f39a",
"size": "14240",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Bryan/main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "154402"
}
],
"symlink_target": ""
}
|
import copy
import eventlet
import mock
from nova import exception
from nova import objects
from nova import test
from pypowervm.tests import test_fixtures as pvm_fx
from pypowervm.wrappers import iocard as pvm_card
from pypowervm.wrappers import network as pvm_net
from nova_powervm.tests.virt import powervm
from nova_powervm.virt.powervm.tasks import network as tf_net
def cna(mac):
"""Builds a mock Client Network Adapter (or VNIC) for unit tests."""
nic = mock.MagicMock()
nic.mac = mac
nic.vswitch_uri = 'fake_href'
return nic
class TestNetwork(test.NoDBTestCase):
def setUp(self):
super(TestNetwork, self).setUp()
self.flags(host='host1')
self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt
self.mock_lpar_wrap = mock.MagicMock()
self.mock_lpar_wrap.can_modify_io.return_value = True, None
@mock.patch('nova_powervm.virt.powervm.vif.unplug', autospec=True)
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True)
def test_unplug_vifs(self, mock_vm_get, mock_unplug):
"""Tests that a delete of the vif can be done."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the CNA responses.
cnas = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11'), cna('AABBCCDDEE22')]
mock_vm_get.return_value = cnas
# Mock up the network info. This also validates that they will be
# sanitized to upper case.
net_info = [
{'address': 'aa:bb:cc:dd:ee:ff'}, {'address': 'aa:bb:cc:dd:ee:22'},
{'address': 'aa:bb:cc:dd:ee:33'}
]
# Mock out the vif driver
def validate_unplug(adapter, host_uuid, instance, vif,
slot_mgr, cna_w_list=None):
self.assertEqual(adapter, self.apt)
self.assertEqual('host_uuid', host_uuid)
self.assertEqual(instance, inst)
self.assertIn(vif, net_info)
self.assertEqual('slot_mgr', slot_mgr)
self.assertEqual(cna_w_list, cnas)
mock_unplug.side_effect = validate_unplug
# Run method
p_vifs = tf_net.UnplugVifs(self.apt, inst, net_info, 'host_uuid',
'slot_mgr')
p_vifs.execute(self.mock_lpar_wrap)
# Make sure the unplug was invoked, so that we know that the validation
# code was called
self.assertEqual(3, mock_unplug.call_count)
# Validate args on taskflow.task.Task instantiation
with mock.patch('taskflow.task.Task.__init__') as tf:
tf_net.UnplugVifs(self.apt, inst, net_info, 'host_uuid',
'slot_mgr')
tf.assert_called_once_with(name='unplug_vifs', requires=['lpar_wrap'])
def test_unplug_vifs_invalid_state(self):
"""Tests that the delete raises an exception if bad VM state."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock that the state is incorrect
self.mock_lpar_wrap.can_modify_io.return_value = False, 'bad'
# Run method
p_vifs = tf_net.UnplugVifs(self.apt, inst, mock.Mock(), 'host_uuid',
'slot_mgr')
self.assertRaises(exception.VirtualInterfaceUnplugException,
p_vifs.execute, self.mock_lpar_wrap)
@mock.patch('nova_powervm.virt.powervm.vif.plug', autospec=True)
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True)
@mock.patch('nova_powervm.virt.powervm.vm.get_vnics', autospec=True)
def test_plug_vifs_rmc(self, mock_vnic_get, mock_cna_get, mock_plug):
"""Tests that a crt vif can be done with secure RMC."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the CNA response. One should already exist, the other
# should not.
pre_cnas = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11')]
mock_cna_get.return_value = copy.deepcopy(pre_cnas)
# Ditto VNIC response.
mock_vnic_get.return_value = [cna('AABBCCDDEE33'), cna('AABBCCDDEE44')]
# Mock up the network info. This also validates that they will be
# sanitized to upper case.
net_info = [
{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'},
{'address': 'aa:bb:cc:dd:ee:22', 'vnic_type': 'normal'},
{'address': 'aa:bb:cc:dd:ee:33', 'vnic_type': 'direct'},
{'address': 'aa:bb:cc:dd:ee:55', 'vnic_type': 'direct'}
]
# Both updates run first (one CNA, one VNIC); then the CNA create, then
# the VNIC create.
mock_new_cna = mock.Mock(spec=pvm_net.CNA)
mock_new_vnic = mock.Mock(spec=pvm_card.VNIC)
mock_plug.side_effect = ['upd_cna', 'upd_vnic',
mock_new_cna, mock_new_vnic]
# Run method
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid', 'slot_mgr')
all_cnas = p_vifs.execute(self.mock_lpar_wrap)
# new vif should be created twice.
mock_plug.assert_any_call(self.apt, 'host_uuid', inst, net_info[0],
'slot_mgr', new_vif=False)
mock_plug.assert_any_call(self.apt, 'host_uuid', inst, net_info[1],
'slot_mgr', new_vif=True)
mock_plug.assert_any_call(self.apt, 'host_uuid', inst, net_info[2],
'slot_mgr', new_vif=False)
mock_plug.assert_any_call(self.apt, 'host_uuid', inst, net_info[3],
'slot_mgr', new_vif=True)
# The Task provides the list of original CNAs plus only CNAs that were
# created.
self.assertEqual(pre_cnas + [mock_new_cna], all_cnas)
# Validate args on taskflow.task.Task instantiation
with mock.patch('taskflow.task.Task.__init__') as tf:
tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid', 'slot_mgr')
tf.assert_called_once_with(name='plug_vifs', provides='vm_cnas',
requires=['lpar_wrap'])
@mock.patch('nova_powervm.virt.powervm.vif.plug', autospec=True)
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True)
def test_plug_vifs_rmc_no_create(self, mock_vm_get, mock_plug):
"""Verifies if no creates are needed, none are done."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the CNA response. Both should already exist.
mock_vm_get.return_value = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11')]
# Mock up the network info. This also validates that they will be
# sanitized to upper case. This also validates that we don't call
# get_vnics if no nets have vnic_type 'direct'.
net_info = [
{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'},
{'address': 'aa:bb:cc:dd:ee:11', 'vnic_type': 'normal'}
]
# Run method
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid', 'slot_mgr')
p_vifs.execute(self.mock_lpar_wrap)
# The create should have been called with new_vif as False.
mock_plug.assert_called_with(
self.apt, 'host_uuid', inst, net_info[1],
'slot_mgr', new_vif=False)
@mock.patch('nova_powervm.virt.powervm.vif.plug', autospec=True)
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True)
def test_plug_vifs_invalid_state(self, mock_vm_get, mock_plug):
"""Tests that a crt_vif fails when the LPAR state is bad."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the CNA response. Only doing one for simplicity
mock_vm_get.return_value = []
net_info = [{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}]
# Mock that the state is incorrect
self.mock_lpar_wrap.can_modify_io.return_value = False, 'bad'
# Run method
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid', 'slot_mgr')
self.assertRaises(exception.VirtualInterfaceCreateException,
p_vifs.execute, self.mock_lpar_wrap)
# The create should not have been invoked
self.assertEqual(0, mock_plug.call_count)
@mock.patch('nova_powervm.virt.powervm.vif.plug', autospec=True)
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True)
def test_plug_vifs_timeout(self, mock_vm_get, mock_plug):
"""Tests that crt vif failure via loss of neutron callback."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the CNA response. Only doing one for simplicity
mock_vm_get.return_value = [cna('AABBCCDDEE11')]
# Mock up the network info.
net_info = [{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}]
# Ensure that an exception is raised by a timeout.
mock_plug.side_effect = eventlet.timeout.Timeout()
# Run method
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid', 'slot_mgr')
self.assertRaises(exception.VirtualInterfaceCreateException,
p_vifs.execute, self.mock_lpar_wrap)
# The create should have only been called once.
self.assertEqual(1, mock_plug.call_count)
@mock.patch('nova_powervm.virt.powervm.vif.plug', autospec=True)
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True)
def test_plug_vifs_diff_host(self, mock_vm_get, mock_plug):
"""Tests that crt vif handles bad inst.host value."""
inst = powervm.TEST_INST1
# Set this up as a different host from the inst.host
self.flags(host='host2')
# Mock up the CNA response. Only doing one for simplicity
mock_vm_get.return_value = [cna('AABBCCDDEE11')]
# Mock up the network info.
net_info = [{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}]
# Run method
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid', 'slot_mgr')
with mock.patch.object(inst, 'save') as mock_inst_save:
p_vifs.execute(self.mock_lpar_wrap)
# The create should have only been called once.
self.assertEqual(1, mock_plug.call_count)
# Should have called save to save the new host and then changed it back
self.assertEqual(2, mock_inst_save.call_count)
self.assertEqual('host1', inst.host)
@mock.patch('nova_powervm.virt.powervm.vif.plug', autospec=True)
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True)
def test_plug_vifs_diff_host_except(self, mock_vm_get, mock_plug):
"""Tests that crt vif handles bad inst.host value.
This test ensures that if we get a timeout exception we still reset
the inst.host value back to the original value
"""
inst = powervm.TEST_INST1
# Set this up as a different host from the inst.host
self.flags(host='host2')
# Mock up the CNA response. Only doing one for simplicity
mock_vm_get.return_value = [cna('AABBCCDDEE11')]
# Mock up the network info.
net_info = [{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}]
# Ensure that an exception is raised by a timeout.
mock_plug.side_effect = eventlet.timeout.Timeout()
# Run method
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid', 'slot_mgr')
with mock.patch.object(inst, 'save') as mock_inst_save:
self.assertRaises(exception.VirtualInterfaceCreateException,
p_vifs.execute, self.mock_lpar_wrap)
# The create should have only been called once.
self.assertEqual(1, mock_plug.call_count)
# Should have called save to save the new host and then changed it back
self.assertEqual(2, mock_inst_save.call_count)
self.assertEqual('host1', inst.host)
@mock.patch('nova_powervm.virt.powervm.vif.unplug', autospec=True)
@mock.patch('nova_powervm.virt.powervm.vif.plug', autospec=True)
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True)
def test_plug_vifs_revert(self, mock_vm_get, mock_plug, mock_unplug):
"""Tests that the revert flow works properly."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Fake CNA list. The one pre-existing VIF should *not* get reverted.
cna_list = [cna('AABBCCDDEEFF'), cna('FFEEDDCCBBAA')]
mock_vm_get.return_value = cna_list
# Mock up the network info. Three roll backs.
net_info = [
{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'},
{'address': 'aa:bb:cc:dd:ee:22', 'vnic_type': 'normal'},
{'address': 'aa:bb:cc:dd:ee:33', 'vnic_type': 'normal'}
]
# Make sure we test raising an exception
mock_unplug.side_effect = [exception.NovaException(), None]
# Run method
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid', 'slot_mgr')
p_vifs.execute(self.mock_lpar_wrap)
p_vifs.revert(self.mock_lpar_wrap, mock.Mock(), mock.Mock())
# The unplug should be called twice. The exception shouldn't stop the
# second call.
self.assertEqual(2, mock_unplug.call_count)
# Make sure each call is invoked correctly. The first plug was not a
# new vif, so it should not be reverted.
c2 = mock.call(self.apt, 'host_uuid', inst, net_info[1],
'slot_mgr', cna_w_list=cna_list)
c3 = mock.call(self.apt, 'host_uuid', inst, net_info[2],
'slot_mgr', cna_w_list=cna_list)
mock_unplug.assert_has_calls([c2, c3])
@mock.patch('nova_powervm.virt.powervm.vif.plug_secure_rmc_vif',
autospec=True)
@mock.patch('nova_powervm.virt.powervm.vif.get_secure_rmc_vswitch',
autospec=True)
@mock.patch('nova_powervm.virt.powervm.vif.plug', autospec=True)
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas', autospec=True)
def test_plug_mgmt_vif(self, mock_vm_get, mock_plug,
mock_get_rmc_vswitch, mock_plug_rmc_vif):
"""Tests that a mgmt vif can be created."""
inst = objects.Instance(**powervm.TEST_INSTANCE)
# Mock up the rmc vswitch
vswitch_w = mock.MagicMock()
vswitch_w.href = 'fake_mgmt_uri'
mock_get_rmc_vswitch.return_value = vswitch_w
# Run method such that it triggers a fresh CNA search
p_vifs = tf_net.PlugMgmtVif(self.apt, inst, 'host_uuid', 'slot_mgr')
p_vifs.execute(None)
# With the default get_cnas mock (which returns a Mock()), we think we
# found an existing management CNA.
self.assertEqual(0, mock_plug_rmc_vif.call_count)
mock_vm_get.assert_called_once_with(
self.apt, inst, vswitch_uri='fake_mgmt_uri')
# Now mock get_cnas to return no hits
mock_vm_get.reset_mock()
mock_vm_get.return_value = []
p_vifs.execute(None)
# Get was called; and since it didn't have the mgmt CNA, so was plug.
self.assertEqual(1, mock_plug_rmc_vif.call_count)
mock_vm_get.assert_called_once_with(
self.apt, inst, vswitch_uri='fake_mgmt_uri')
# Now pass CNAs, but not the mgmt vif, "from PlugVifs"
cnas = [mock.Mock(vswitch_uri='uri1'), mock.Mock(vswitch_uri='uri2')]
mock_plug_rmc_vif.reset_mock()
mock_vm_get.reset_mock()
p_vifs.execute(cnas)
# Get wasn't called, since the CNAs were passed "from PlugVifs"; but
# since the mgmt vif wasn't included, plug was called.
self.assertEqual(0, mock_vm_get.call_count)
self.assertEqual(1, mock_plug_rmc_vif.call_count)
# Finally, pass CNAs including the mgmt.
cnas.append(mock.Mock(vswitch_uri='fake_mgmt_uri'))
mock_plug_rmc_vif.reset_mock()
p_vifs.execute(cnas)
# Neither get nor plug was called.
self.assertEqual(0, mock_vm_get.call_count)
self.assertEqual(0, mock_plug_rmc_vif.call_count)
# Validate args on taskflow.task.Task instantiation
with mock.patch('taskflow.task.Task.__init__') as tf:
tf_net.PlugMgmtVif(self.apt, inst, 'host_uuid', 'slot_mgr')
tf.assert_called_once_with(name='plug_mgmt_vif', provides='mgmt_cna',
requires=['vm_cnas'])
def test_get_vif_events(self):
# Set up common mocks.
inst = objects.Instance(**powervm.TEST_INSTANCE)
net_info = [mock.MagicMock(), mock.MagicMock()]
net_info[0]['id'] = 'a'
net_info[0].get.return_value = False
net_info[1]['id'] = 'b'
net_info[1].get.return_value = True
# Set up the runner.
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid', 'slot_mgr')
p_vifs.crt_network_infos = net_info
resp = p_vifs._get_vif_events()
# Only one should be returned since only one was active.
self.assertEqual(1, len(resp))
|
{
"content_hash": "e5bc1fddb318a29044a6cfa00aaff536",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 79,
"avg_line_length": 43.8225,
"alnum_prop": 0.6013463403502767,
"repo_name": "stackforge/nova-powervm",
"id": "c04e8aa221456c287f5d42ec2b421df322445b3a",
"size": "18162",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova_powervm/tests/virt/powervm/tasks/test_network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "716308"
},
{
"name": "Shell",
"bytes": "6293"
}
],
"symlink_target": ""
}
|
from __future__ import division
import numpy as np
from library import match_args_return
from constants import cp0
from conversions import t_from_CT, pt_from_CT
from absolute_salinity_sstar_ct import CT_from_t
from basic_thermodynamic_t import rho_t_exact, alpha_wrt_CT_t_exact
from basic_thermodynamic_t import beta_const_CT_t_exact, specvol_t_exact
from basic_thermodynamic_t import specvol_anom_t_exact, sound_speed_t_exact
from basic_thermodynamic_t import t_maxdensity_exact, enthalpy_t_exact
from basic_thermodynamic_t import internal_energy_t_exact, sigma0_pt0_exact
from basic_thermodynamic_t import t_from_rho_exact
__all__ = [
'rho_CT_exact',
'alpha_CT_exact',
'beta_CT_exact',
'rho_alpha_beta_CT_exact',
'specvol_CT_exact',
'specvol_anom_CT_exact',
'sigma0_CT_exact',
'sigma1_CT_exact',
'sigma2_CT_exact',
'sigma3_CT_exact',
'sigma4_CT_exact',
'sound_speed_CT_exact',
'internal_energy_CT_exact',
'enthalpy_CT_exact',
'enthalpy_diff_CT_exact',
'dynamic_enthalpy_CT_exact',
'SA_from_rho_CT_exact',
'CT_from_rho_exact',
'CT_maxdensity_exact'
]
def rho_CT_exact(SA, CT, p):
r"""Calculates in-situ density from Absolute Salinity and Conservative
Temperature.
Parameters
----------
SA : array_like
Absolute Salinity [g/kg]
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
p : array_like
sea pressure [dbar]
Returns
-------
rho_CT_exact : array_like
in-situ density [kg/m**3]
See Also
--------
TODO
Notes
-----
The potential density with respect to reference pressure, p_ref, is
obtained by calling this function with the pressure argument being p_ref
(i.e. "rho_CT_exact(SA, CT, p_ref)"). This function uses the full Gibbs
function. There is an alternative to calling this function, namely
rho_CT(SA, CT, p), which uses the computationally efficient 48-term
expression for density in terms of SA, CT and p (McDougall et al., 2011).
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (2.8.2).
.. [2] McDougall T.J., P.M. Barker, R. Feistel and D.R. Jackett, 2011: A
computationally efficient 48-term expression for the density of
seawater in terms of Conservative Temperature, and related properties
of seawater.
Modifications:
2011-04-03. Trevor McDougall and Paul Barker.
"""
t = t_from_CT(SA, CT, p)
return rho_t_exact(SA, t, p)
def alpha_CT_exact(SA, CT, p):
r"""Calculates the thermal expansion coefficient of seawater with respect
to Conservative Temperature from Absolute Salinity and Conservative
Temperature.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
alpha_CT_exact : array_like
thermal expansion coefficient [K :sup:`-1`]
with respect to Conservative Temperature
See Also
--------
TODO
Notes
-----
This function uses the full Gibbs function. There is an alternative to
calling this function, namely alpha_wrt_CT(SA, CT, p) which uses the
computationally efficient 48-term expression for density in terms of SA,
CT and p (McDougall et al., (2011)).
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (2.18.3).
.. [2] McDougall T.J., P.M. Barker, R. Feistel and D.R. Jackett, 2011: A
computationally efficient 48-term expression for the density of
seawater in terms of Conservative Temperature, and related properties
of seawater.
Modifications:
2011-03-23. David Jackett, Trevor McDougall and Paul Barker.
"""
t = t_from_CT(SA, CT, p)
return alpha_wrt_CT_t_exact(SA, t, p)
def beta_CT_exact(SA, CT, p):
r"""Calculates the saline (i.e. haline) contraction coefficient of seawater
at constant Conservative Temperature.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
beta_CT_exact : array_like
thermal expansion coefficient [K :sup:`-1`]
See Also
--------
TODO
Notes
-----
This function uses the full Gibbs function. There is an alternative to
calling this function, namely beta_const_CT(SA, CT, p) which uses the
computationally efficient 48-term expression for density in terms of SA, CT
and p (McDougall et al., (2011)).
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (2.19.3).
.. [2] McDougall T.J., P.M. Barker, R. Feistel and D.R. Jackett, 2011: A
computationally efficient 48-term expression for the density of
seawater in terms of Conservative Temperature, and related properties
of seawater.
Modifications:
2011-03-23. Trevor McDougall and Paul Barker.
"""
t = t_from_CT(SA, CT, p)
return beta_const_CT_t_exact(SA, t, p)
def rho_alpha_beta_CT_exact(SA, CT, p):
r"""Calculates in-situ density, the appropriate thermal expansion
coefficient and the appropriate saline contraction coefficient of seawater
from Absolute Salinity and Conservative Temperature.
See the individual functions rho_CT_exact, alpha_CT_exact, and
beta_CT_exact. Retained for compatibility with the Matlab GSW toolbox.
"""
t = t_from_CT(SA, CT, p)
rho_CT_exact = rho_t_exact(SA, t, p)
alpha_CT_exact = alpha_wrt_CT_t_exact(SA, t, p)
beta_CT_exact = beta_const_CT_t_exact(SA, t, p)
return (rho_CT_exact,
alpha_CT_exact,
beta_CT_exact)
def specvol_CT_exact(SA, CT, p):
r"""Calculates specific volume from Absolute Salinity, Conservative
Temperature and pressure.
Parameters
----------
SA : array_like
Absolute Salinity [g/kg]
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
p : array_like
sea pressure [dbar]
Returns
-------
specvol_CT_exact : array_like
specific volume [m**3/kg]
See Also
--------
TODO
Notes
-----
This function uses the full Gibbs function. There is an alternative to
calling this function, namely specvol_CT(SA, CT, p), which uses the
computationally efficient 48-term expression for density in terms of SA, CT
and p (McDougall et al., 2011).
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (2.7.2).
.. [2] McDougall T.J., P.M. Barker, R. Feistel and D.R. Jackett, 2011: A
computationally efficient 48-term expression for the density of
seawater in terms of Conservative Temperature, and related properties
of seawater.
Modifications:
2011-04-06. Trevor McDougall and Paul Barker.
"""
t = t_from_CT(SA, CT, p)
return specvol_t_exact(SA, t, p)
def specvol_anom_CT_exact(SA, CT, p):
r"""Calculates specific volume anomaly from Absolute Salinity, Conservative
Temperature and pressure. The reference value of Absolute Salinity is SSO
and the reference value of Conservative Temperature is equal to 0 deg C.
Parameters
----------
SA : array_like
Absolute Salinity [g/kg]
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
p : array_like
sea pressure [dbar]
Returns
-------
specvol_anom_CT_exact : array_like
specific volume anomaly [m**3/kg]
See Also
--------
TODO
Notes
-----
This function uses the full Gibbs function. There is an alternative to
calling this function, namely specvol_anom_CT(SA, CT, p), which uses the
computationally efficient 48-term expression for density in terms of SA, CT
and p (McDougall et al., 2011).
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (3.7.3).
.. [2] McDougall T.J., P.M. Barker, R. Feistel and D.R. Jackett, 2011: A
computationally efficient 48-term expression for the density of
seawater in terms of Conservative Temperature, and related properties
of seawater.
Modifications:
2011-04-06. Trevor McDougall and Paul Barker.
"""
t = t_from_CT(SA, CT, p)
return specvol_anom_t_exact(SA, t, p)
def sigma0_CT_exact(SA, CT):
r"""Calculates potential density anomaly with reference pressure of 0 dbar,
this being this particular potential density minus 1000 kg/m^3. This
function has inputs of Absolute Salinity and Conservative Temperature.
Parameters
----------
SA : array_like
Absolute Salinity [g/kg]
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
Returns
-------
sigma0_CT_exact: array_like
Potential density anomaly with [kg/m**3]
respect to a reference pressure of 0 dbar
that is, this potential density - 1000 kg/m**3.
Notes
-----
Note that this function uses the full Gibbs function. There is an
alternative to calling this function, namely gsw_sigma0_CT(SA,CT,p), which
uses the computationally efficient 48-term expression for density in terms
of SA, CT and p (McDougall et al., 2011).
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (A.30.1).
.. [2] McDougall T.J., P.M. Barker, R. Feistel and D.R. Jackett, 2011: A
computationally efficient 48-term expression for the density of
seawater in terms of Conservative Temperature, and related properties
of seawater.
Modifications:
2011-04-03. Trevor McDougall and Paul Barker.
"""
pt0 = pt_from_CT(SA, CT)
return sigma0_pt0_exact(SA, pt0)
def sigma1_CT_exact(SA, CT):
r"""Calculates potential density anomaly with reference pressure of
1000 dbar."""
t = t_from_CT(SA, CT, 1000.)
return rho_t_exact(SA, t, 1000.) - 1000
def sigma2_CT_exact(SA, CT):
r"""Calculates potential density anomaly with reference pressure of
2000 dbar."""
t = t_from_CT(SA, CT, 2000.)
return rho_t_exact(SA, t, 2000.) - 1000
def sigma3_CT_exact(SA, CT):
r"""Calculates potential density anomaly with reference pressure of
3000 dbar."""
t = t_from_CT(SA, CT, 3000.)
return rho_t_exact(SA, t, 3000.) - 1000
def sigma4_CT_exact(SA, CT):
r"""Calculates potential density anomaly with reference pressure of
4000 dbar."""
t = t_from_CT(SA, CT, 4000.)
return rho_t_exact(SA, t, 4000.) - 1000
def sound_speed_CT_exact(SA, CT, p):
r"""Calculates the speed of sound in seawater from Absolute Salinity and
Conservative Temperature and pressure.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
p : array_like
pressure [dbar]
Returns
-------
sound_speed_CT_exact : array_like
Speed of sound in seawater [m s :sup:`-1`]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (2.17.1).
Modifications:
2011-04-05. David Jackett, Paul Barker and Trevor McDougall.
"""
t = t_from_CT(SA, CT, p)
return sound_speed_t_exact(SA, t, p)
def internal_energy_CT_exact(SA, CT, p):
r"""Calculates the specific internal energy of seawater from Absolute
Salinity, Conservative Temperature and pressure.
Parameters
----------
SA : array_like
Absolute Salinity [g/kg]
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
p : array_like
sea pressure [dbar]
Returns
-------
internal_energy_CT_exact: array_like
specific internal energy (u) [J/kg]
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqn. (2.11.1).
Modifications:
2011-04-05. Trevor McDougall.
"""
t = t_from_CT(SA, CT, p)
return internal_energy_t_exact(SA, t, p)
def enthalpy_CT_exact(SA, CT, p):
r"""Calculates specific enthalpy of seawater from Absolute Salinity and
Conservative Temperature and pressure.
Parameters
----------
SA : array_like
Absolute Salinity [g/kg]
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
p : array_like
sea pressure [dbar]
Returns
-------
enthalpy_CT_exact : array_like
specific enthalpy [J/kg]
See Also
--------
TODO
Notes
-----
This function uses the full Gibbs function. There is an alternative to
calling this function, namely enthalpy_CT(SA, CT, p), which uses the
computationally-efficient 48-term expression for density in terms of SA, CT
and p (McDougall et al., 2011).
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See appendix A.11.
.. [2] McDougall T.J., P.M. Barker, R. Feistel and D.R. Jackett, 2011: A
computationally efficient 48-term expression for the density of
seawater in terms of Conservative Temperature, and related properties
of seawater.
Modifications:
2011-04-06. Trevor McDougall and Paul Barker.
"""
t = t_from_CT(SA, CT, p)
return enthalpy_t_exact(SA, t, p)
def enthalpy_diff_CT_exact(SA, CT, p_shallow, p_deep):
r"""Calculates the difference of the specific enthalpy of seawater between
two different pressures, p_deep (the deeper pressure) and p_shallow (the
shallower pressure), at the same values of SA and CT. The output
(enthalpy_diff_CT_exact) is the specific enthalpy evaluated at
(SA, CT, p_deep) minus the specific enthalpy at (SA,CT,p_shallow).
parameters
----------
Parameters
----------
SA : array_like
Absolute Salinity [g/kg]
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
p_shallow : array_like
lower sea pressure [dbar]
p_deep : array-like
upper sea pressure [dbar]
returns
-------
enthalpy_diff_CT_exact : array_like
difference of specific enthalpy [J/kg]
(deep minus shallow)
See Also
--------
TODO
Notes
-----
This function uses the full Gibbs function. There is an alternative to
calling this function, namely enthalpy_diff_CT(SA, CT, p), which uses the
computationally efficient 48-term expression for density in terms of SA, CT
and p (McDougall et al., 2011).
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See Eqns (3.32.2).
.. [2] McDougall T.J., P.M. Barker, R. Feistel and D.R. Jackett, 2011: A
computationally efficient 48-term expression for the density of
seawater in terms of Conservative Temperature, and related properties
of seawater.
Modifications:
2011-04-06. Trevor McDougall and Paul Barker.
"""
t_shallow = t_from_CT(SA, CT, p_shallow)
t_deep = t_from_CT(SA, CT, p_deep)
return (enthalpy_t_exact(SA, t_deep, p_deep) -
enthalpy_t_exact(SA, t_shallow, p_shallow))
def dynamic_enthalpy_CT_exact(SA, CT, p):
r"""Calculates the dynamic enthalpy of seawater from Absolute Salinity and
Conservative Temperature and pressure. Dynamic enthalpy is defined as
enthalpy minus potential enthalpy (Young, 2010).
Parameters
----------
SA : array_like
Absolute Salinity [g/kg]
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
p : array_like
sea pressure [dbar]
Returns
-------
dynamic_enthalpy_CT_exact : array_like
dynamic enthalpy [J/kg]
See Also
--------
TODO
Notes
-----
This function uses the full Gibbs function. There is an alternative to
calling this function, namely dynamic_enthalpy(SA, CT, p), which uses the
computationally efficient 48-term expression for density in terms of SA, CT
and p (McDougall et al., 2011).
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See apendix A.30.
.. [2] McDougall T.J., P.M. Barker, R. Feistel and D.R. Jackett, 2011: A
computationally efficient 48-term expression for the density of
seawater in terms of Conservative Temperature, and related properties
of seawater.
.. [3] Young, W.R., 2010: Dynamic enthalpy, Conservative Temperature, and
the seawater Boussinesq approximation. Journal of Physical Oceanography,
40, 394-400.
Modifications:
2011-04-05. Trevor McDougall and Paul Barker.
"""
t = t_from_CT(SA, CT, p)
return enthalpy_t_exact(SA, t, p) - cp0 * CT
@match_args_return
def SA_from_rho_CT_exact(rho, CT, p):
r"""Calculates the Absolute Salinity of a seawater sample, for given values
of its density, Conservative Temperature and sea pressure (in dbar).
Parameters
----------
rho : array_like
density of a seawater sample [kg/m**3]
This input has not had 1000 kg/m^3 subtracted from it
(e.g. 1026 kg m**-3), that is, it is density, NOT density anomaly.
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
p : array_like
sea pressure [dbar]
Returns
-------
SA : array_like
Absolute Salinity [g/kg]
See Also
--------
TODO
Notes
-----
This function uses the full Gibbs function. There is an alternative to
calling this function, namely SA_from_rho_CT(rho, CT, p), which uses the
computationally efficient 48-term expression for density in terms of SA, CT
and p (McDougall et al., 2011).
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See section 2.5.
.. [2] Millero, F. J., R. Feistel, D. G. Wright, and T. J. McDougall, 2008:
The composition of Standard Seawater and the definition of the
Reference-Composition Salinity Scale, Deep-Sea Res. I, 55, 50-72.
Modifications:
2011-04-05. Trevor McDougall and Paul Barker.
"""
v_lab = 1. / rho
v_0 = specvol_CT_exact(np.zeros_like(rho), CT, p)
v_120 = specvol_CT_exact(120 * np.ones_like(rho), CT, p)
SA = 120 * (v_lab - v_0) / (v_120 - v_0) # Initial estimate of SA.
Ior = (SA < 0) | (SA > 120)
SA[Ior] = np.NaN
v_SA = (v_120 - v_0) / 120 # Initial v_SA estimate (SA derivative of v).
# Begin the modified Newton-Raphson iterative procedure.
for Number_of_iterations in range(0, 3):
SA_old = SA
delta_v = specvol_CT_exact(SA_old, CT, p) - v_lab
SA = SA_old - delta_v / v_SA # Half way through the mod. N-R method.
SA_mean = 0.5 * (SA + SA_old)
rho, alpha, beta = rho_alpha_beta_CT_exact(SA_mean, CT, p)
v_SA = -beta / rho
SA = SA_old - delta_v / v_SA
Ior = (SA < 0) | (SA > 120)
SA[Ior] = np.ma.masked
"""After two iterations of this modified Newton-Raphson iteration, the
error in SA is no larger than 8x10^-13 g kg^-1, which is machine precision
for this calculation."""
return SA
def CT_from_rho_exact(rho, SA, p):
r"""Calculates the in-situ temperature of a seawater sample, for given
values of its density, Absolute Salinity and sea pressure (in dbar).
Parameters
----------
rho : array_like
density of a seawater sample [kg/m**3]
This input has not had 1000 kg/m^3 subtracted from it
(e.g. 1026 kg m**-3), that is, it is density, NOT density anomaly.
SA : array_like
Absolute Salinity [g/kg]
p : array_like
sea pressure [dbar]
Returns
-------
CT : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
CT_multiple : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
See Also
--------
TODO
Notes
-----
At low salinities, in brackish water, there are two possible temperatures
for a single density. This program will output both valid solutions
(t, t_multiple), if there is only one possible solution the second variable
will be set to NaN.
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See section 2.5.
Modifications:
2011-04-21. Trevor McDougall and Paul Barker.
"""
t, t_multiple = t_from_rho_exact(rho, SA, p)
return (CT_from_t(SA, t, p),
CT_from_t(SA, t_multiple, p))
def CT_maxdensity_exact(SA, p):
r"""Calculates the Conservative Temperature of maximum density of seawater.
This function returns the Conservative temperature at which the density of
seawater is a maximum, at given Absolute Salinity, SA, and sea pressure,
p (in dbar).
Parameters
----------
SA : array_like
Absolute Salinity [g/kg]
p : array_like
sea pressure [dbar]
Returns
-------
CT_maxdensity_exact : array_like
Conservative Temperature [:math:`^\circ` C (ITS-90)]
at which the density of seawater is a maximum for given SA and p.
See Also
--------
TODO
Notes
-----
TODO
Examples
--------
TODO
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See section 3.42.
Modifications:
2011-04-03. Trevor McDougall and Paul Barker.
"""
t_max_exact = t_maxdensity_exact(SA, p)
return CT_from_t(SA, t_max_exact, p)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
{
"content_hash": "af70dd4562a076ac40b5145f7c25b1f7",
"timestamp": "",
"source": "github",
"line_count": 856,
"max_line_length": 79,
"avg_line_length": 29.917056074766354,
"alnum_prop": 0.629270959428326,
"repo_name": "lukecampbell/python-gsw",
"id": "bf6ceefd24711772231c90a40fd4391552ccc8c7",
"size": "25634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gsw/gibbs/density_enthalpy_ct_exact.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "374861"
}
],
"symlink_target": ""
}
|
"""Utilities for exporting TensorFlow symbols to the API.
Exporting a function or a class:
To export a function or a class use tf_export decorator. For e.g.:
```python
@tf_export('foo', 'bar.foo')
def foo(...):
...
```
If a function is assigned to a variable, you can export it by calling
tf_export explicitly. For e.g.:
```python
foo = get_foo(...)
tf_export('foo', 'bar.foo')(foo)
```
Exporting a constant
```python
foo = 1
tf_export('consts.foo').export_constant(__name__, 'foo')
```
"""
import collections
import functools
import sys
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
ESTIMATOR_API_NAME = 'estimator'
KERAS_API_NAME = 'keras'
TENSORFLOW_API_NAME = 'tensorflow'
# List of subpackage names used by TensorFlow components. Have to check that
# TensorFlow core repo does not export any symbols under these names.
SUBPACKAGE_NAMESPACES = [ESTIMATOR_API_NAME]
_Attributes = collections.namedtuple(
'ExportedApiAttributes', ['names', 'constants'])
# Attribute values must be unique to each API.
API_ATTRS = {
TENSORFLOW_API_NAME: _Attributes(
'_tf_api_names',
'_tf_api_constants'),
ESTIMATOR_API_NAME: _Attributes(
'_estimator_api_names',
'_estimator_api_constants'),
KERAS_API_NAME: _Attributes(
'_keras_api_names',
'_keras_api_constants')
}
API_ATTRS_V1 = {
TENSORFLOW_API_NAME: _Attributes(
'_tf_api_names_v1',
'_tf_api_constants_v1'),
ESTIMATOR_API_NAME: _Attributes(
'_estimator_api_names_v1',
'_estimator_api_constants_v1'),
KERAS_API_NAME: _Attributes(
'_keras_api_names_v1',
'_keras_api_constants_v1')
}
class SymbolAlreadyExposedError(Exception):
"""Raised when adding API names to symbol that already has API names."""
pass
class InvalidSymbolNameError(Exception):
"""Raised when trying to export symbol as an invalid or unallowed name."""
pass
_NAME_TO_SYMBOL_MAPPING = dict()
def get_symbol_from_name(name):
return _NAME_TO_SYMBOL_MAPPING.get(name)
def get_canonical_name_for_symbol(
symbol, api_name=TENSORFLOW_API_NAME,
add_prefix_to_v1_names=False):
"""Get canonical name for the API symbol.
Args:
symbol: API function or class.
api_name: API name (tensorflow or estimator).
add_prefix_to_v1_names: Specifies whether a name available only in V1
should be prefixed with compat.v1.
Returns:
Canonical name for the API symbol (for e.g. initializers.zeros) if
canonical name could be determined. Otherwise, returns None.
"""
if not hasattr(symbol, '__dict__'):
return None
api_names_attr = API_ATTRS[api_name].names
_, undecorated_symbol = tf_decorator.unwrap(symbol)
if api_names_attr not in undecorated_symbol.__dict__:
return None
api_names = getattr(undecorated_symbol, api_names_attr)
deprecated_api_names = undecorated_symbol.__dict__.get(
'_tf_deprecated_api_names', [])
canonical_name = get_canonical_name(api_names, deprecated_api_names)
if canonical_name:
return canonical_name
# If there is no V2 canonical name, get V1 canonical name.
api_names_attr = API_ATTRS_V1[api_name].names
api_names = getattr(undecorated_symbol, api_names_attr)
v1_canonical_name = get_canonical_name(api_names, deprecated_api_names)
if add_prefix_to_v1_names:
return 'compat.v1.%s' % v1_canonical_name
return v1_canonical_name
def get_canonical_name(api_names, deprecated_api_names):
"""Get preferred endpoint name.
Args:
api_names: API names iterable.
deprecated_api_names: Deprecated API names iterable.
Returns:
Returns one of the following in decreasing preference:
- first non-deprecated endpoint
- first endpoint
- None
"""
non_deprecated_name = next(
(name for name in api_names if name not in deprecated_api_names),
None)
if non_deprecated_name:
return non_deprecated_name
if api_names:
return api_names[0]
return None
def get_v1_names(symbol):
"""Get a list of TF 1.* names for this symbol.
Args:
symbol: symbol to get API names for.
Returns:
List of all API names for this symbol including TensorFlow and
Estimator names.
"""
names_v1 = []
tensorflow_api_attr_v1 = API_ATTRS_V1[TENSORFLOW_API_NAME].names
estimator_api_attr_v1 = API_ATTRS_V1[ESTIMATOR_API_NAME].names
keras_api_attr_v1 = API_ATTRS_V1[KERAS_API_NAME].names
if not hasattr(symbol, '__dict__'):
return names_v1
if tensorflow_api_attr_v1 in symbol.__dict__:
names_v1.extend(getattr(symbol, tensorflow_api_attr_v1))
if estimator_api_attr_v1 in symbol.__dict__:
names_v1.extend(getattr(symbol, estimator_api_attr_v1))
if keras_api_attr_v1 in symbol.__dict__:
names_v1.extend(getattr(symbol, keras_api_attr_v1))
return names_v1
def get_v2_names(symbol):
"""Get a list of TF 2.0 names for this symbol.
Args:
symbol: symbol to get API names for.
Returns:
List of all API names for this symbol including TensorFlow and
Estimator names.
"""
names_v2 = []
tensorflow_api_attr = API_ATTRS[TENSORFLOW_API_NAME].names
estimator_api_attr = API_ATTRS[ESTIMATOR_API_NAME].names
keras_api_attr = API_ATTRS[KERAS_API_NAME].names
if not hasattr(symbol, '__dict__'):
return names_v2
if tensorflow_api_attr in symbol.__dict__:
names_v2.extend(getattr(symbol, tensorflow_api_attr))
if estimator_api_attr in symbol.__dict__:
names_v2.extend(getattr(symbol, estimator_api_attr))
if keras_api_attr in symbol.__dict__:
names_v2.extend(getattr(symbol, keras_api_attr))
return names_v2
def get_v1_constants(module):
"""Get a list of TF 1.* constants in this module.
Args:
module: TensorFlow module.
Returns:
List of all API constants under the given module including TensorFlow and
Estimator constants.
"""
constants_v1 = []
tensorflow_constants_attr_v1 = API_ATTRS_V1[TENSORFLOW_API_NAME].constants
estimator_constants_attr_v1 = API_ATTRS_V1[ESTIMATOR_API_NAME].constants
if hasattr(module, tensorflow_constants_attr_v1):
constants_v1.extend(getattr(module, tensorflow_constants_attr_v1))
if hasattr(module, estimator_constants_attr_v1):
constants_v1.extend(getattr(module, estimator_constants_attr_v1))
return constants_v1
def get_v2_constants(module):
"""Get a list of TF 2.0 constants in this module.
Args:
module: TensorFlow module.
Returns:
List of all API constants under the given module including TensorFlow and
Estimator constants.
"""
constants_v2 = []
tensorflow_constants_attr = API_ATTRS[TENSORFLOW_API_NAME].constants
estimator_constants_attr = API_ATTRS[ESTIMATOR_API_NAME].constants
if hasattr(module, tensorflow_constants_attr):
constants_v2.extend(getattr(module, tensorflow_constants_attr))
if hasattr(module, estimator_constants_attr):
constants_v2.extend(getattr(module, estimator_constants_attr))
return constants_v2
class api_export(object): # pylint: disable=invalid-name
"""Provides ways to export symbols to the TensorFlow API."""
def __init__(self, *args, **kwargs): # pylint: disable=g-doc-args
"""Export under the names *args (first one is considered canonical).
Args:
*args: API names in dot delimited format.
**kwargs: Optional keyed arguments.
v1: Names for the TensorFlow V1 API. If not set, we will use V2 API
names both for TensorFlow V1 and V2 APIs.
overrides: List of symbols that this is overriding
(those overrided api exports will be removed). Note: passing overrides
has no effect on exporting a constant.
api_name: Name of the API you want to generate (e.g. `tensorflow` or
`estimator`). Default is `tensorflow`.
allow_multiple_exports: Allow symbol to be exported multiple time under
different names.
"""
self._names = args
self._names_v1 = kwargs.get('v1', args)
if 'v2' in kwargs:
raise ValueError('You passed a "v2" argument to tf_export. This is not '
'what you want. Pass v2 names directly as positional '
'arguments instead.')
self._api_name = kwargs.get('api_name', TENSORFLOW_API_NAME)
self._overrides = kwargs.get('overrides', [])
self._allow_multiple_exports = kwargs.get('allow_multiple_exports', False)
self._validate_symbol_names()
def _validate_symbol_names(self):
"""Validate you are exporting symbols under an allowed package.
We need to ensure things exported by tf_export, estimator_export, etc.
export symbols under disjoint top-level package names.
For TensorFlow, we check that it does not export anything under subpackage
names used by components (estimator, keras, etc.).
For each component, we check that it exports everything under its own
subpackage.
Raises:
InvalidSymbolNameError: If you try to export symbol under disallowed name.
"""
all_symbol_names = set(self._names) | set(self._names_v1)
if self._api_name == TENSORFLOW_API_NAME:
for subpackage in SUBPACKAGE_NAMESPACES:
if any(n.startswith(subpackage) for n in all_symbol_names):
raise InvalidSymbolNameError(
'@tf_export is not allowed to export symbols under %s.*' % (
subpackage))
else:
if not all(n.startswith(self._api_name) for n in all_symbol_names):
raise InvalidSymbolNameError(
'Can only export symbols under package name of component. '
'e.g. tensorflow_estimator must export all symbols under '
'tf.estimator')
def __call__(self, func):
"""Calls this decorator.
Args:
func: decorated symbol (function or class).
Returns:
The input function with _tf_api_names attribute set.
Raises:
SymbolAlreadyExposedError: Raised when a symbol already has API names
and kwarg `allow_multiple_exports` not set.
"""
api_names_attr = API_ATTRS[self._api_name].names
api_names_attr_v1 = API_ATTRS_V1[self._api_name].names
# Undecorate overridden names
for f in self._overrides:
_, undecorated_f = tf_decorator.unwrap(f)
delattr(undecorated_f, api_names_attr)
delattr(undecorated_f, api_names_attr_v1)
_, undecorated_func = tf_decorator.unwrap(func)
self.set_attr(undecorated_func, api_names_attr, self._names)
self.set_attr(undecorated_func, api_names_attr_v1, self._names_v1)
for name in self._names:
_NAME_TO_SYMBOL_MAPPING[name] = func
for name_v1 in self._names_v1:
_NAME_TO_SYMBOL_MAPPING['compat.v1.%s' % name_v1] = func
return func
def set_attr(self, func, api_names_attr, names):
# Check for an existing api. We check if attribute name is in
# __dict__ instead of using hasattr to verify that subclasses have
# their own _tf_api_names as opposed to just inheriting it.
if api_names_attr in func.__dict__:
if not self._allow_multiple_exports:
raise SymbolAlreadyExposedError(
'Symbol %s is already exposed as %s.' %
(func.__name__, getattr(func, api_names_attr))) # pylint: disable=protected-access
setattr(func, api_names_attr, names)
def export_constant(self, module_name, name):
"""Store export information for constants/string literals.
Export information is stored in the module where constants/string literals
are defined.
e.g.
```python
foo = 1
bar = 2
tf_export("consts.foo").export_constant(__name__, 'foo')
tf_export("consts.bar").export_constant(__name__, 'bar')
```
Args:
module_name: (string) Name of the module to store constant at.
name: (string) Current constant name.
"""
module = sys.modules[module_name]
api_constants_attr = API_ATTRS[self._api_name].constants
api_constants_attr_v1 = API_ATTRS_V1[self._api_name].constants
if not hasattr(module, api_constants_attr):
setattr(module, api_constants_attr, [])
# pylint: disable=protected-access
getattr(module, api_constants_attr).append(
(self._names, name))
if not hasattr(module, api_constants_attr_v1):
setattr(module, api_constants_attr_v1, [])
getattr(module, api_constants_attr_v1).append(
(self._names_v1, name))
def kwarg_only(f):
"""A wrapper that throws away all non-kwarg arguments."""
f_argspec = tf_inspect.getargspec(f)
def wrapper(*args, **kwargs):
if args:
raise TypeError(
'{f} only takes keyword args (possible keys: {kwargs}). '
'Please pass these args as kwargs instead.'
.format(f=f.__name__, kwargs=f_argspec.args))
return f(**kwargs)
return tf_decorator.make_decorator(f, wrapper, decorator_argspec=f_argspec)
tf_export = functools.partial(api_export, api_name=TENSORFLOW_API_NAME)
estimator_export = functools.partial(api_export, api_name=ESTIMATOR_API_NAME)
keras_export = functools.partial(api_export, api_name=KERAS_API_NAME)
|
{
"content_hash": "90ba466182d783a1abb28b5a3fcd3339",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 95,
"avg_line_length": 33.10941475826972,
"alnum_prop": 0.6829849369812481,
"repo_name": "Intel-Corporation/tensorflow",
"id": "ec126807620614fab011599b72b694f67fce419a",
"size": "13701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/util/tf_export.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
}
|
class Solution:
# @param root, a tree node
# @return a boolean
def isSymmetric(self, root):
if not root:
return True
return self.isSym(root.left, root.right)
def isSym(self, left, right):
if (not left) and (not right):
return True
if (not left) and right:
return False
if left and (not right):
return False
if left.val != right.val:
return False
return self.isSym(left.left, right.right) and self.isSym(left.right, right.left)
|
{
"content_hash": "e1d39da3b19b2df2fa32265220c244e6",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 88,
"avg_line_length": 30.526315789473685,
"alnum_prop": 0.5448275862068965,
"repo_name": "pikeszfish/Leetcode.py",
"id": "68a9f5580228eb681a3f920d661a1b294a30db55",
"size": "748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leetcode.py/SymmetricTree.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22791"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import sys
from thrift.util.Recursive import fix_spec
from thrift.Thrift import TType, TMessageType, TPriority, TRequestContext, TProcessorEventHandler, TServerInterface, TProcessor, TException, TApplicationException, UnimplementedTypedef
from thrift.protocol.TProtocol import TProtocolException
from json import loads
import sys
if sys.version_info[0] >= 3:
long = int
import thrift.annotation.cpp.ttypes
from .ttypes import UTF8STRINGS, MyEnum, MyStructNestedAnnotation, MyUnion, MyException, MyStruct, SecretStruct, MyId
myStruct = MyStruct(**{
"major" : 42,
"package" : "package",
"my_enum" : 2,
})
|
{
"content_hash": "4be85ca9d76c695aedaff2866b2cb653",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 184,
"avg_line_length": 29.90909090909091,
"alnum_prop": 0.7826747720364742,
"repo_name": "facebook/fbthrift",
"id": "f0d29b26f382455140d058cb0f182855c5e31da1",
"size": "772",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "thrift/compiler/test/fixtures/basic-annotations/gen-py/module/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15608"
},
{
"name": "C++",
"bytes": "10658844"
},
{
"name": "CMake",
"bytes": "147347"
},
{
"name": "CSS",
"bytes": "4028"
},
{
"name": "Cython",
"bytes": "339005"
},
{
"name": "Emacs Lisp",
"bytes": "11229"
},
{
"name": "Go",
"bytes": "447092"
},
{
"name": "Hack",
"bytes": "313122"
},
{
"name": "Java",
"bytes": "1990062"
},
{
"name": "JavaScript",
"bytes": "38872"
},
{
"name": "Mustache",
"bytes": "1269560"
},
{
"name": "Python",
"bytes": "1623026"
},
{
"name": "Ruby",
"bytes": "6111"
},
{
"name": "Rust",
"bytes": "283392"
},
{
"name": "Shell",
"bytes": "6615"
},
{
"name": "Thrift",
"bytes": "1859041"
},
{
"name": "Vim Script",
"bytes": "2887"
}
],
"symlink_target": ""
}
|
FAILEDOPERATION = 'FailedOperation'
# API网关触发器创建失败。
FAILEDOPERATION_APIGATEWAY = 'FailedOperation.ApiGateway'
# 创建触发器失败。
FAILEDOPERATION_APIGW = 'FailedOperation.Apigw'
# 获取Apm InstanceId失败。
FAILEDOPERATION_APMCONFIGINSTANCEID = 'FailedOperation.ApmConfigInstanceId'
# 当前异步事件状态不支持此操作,请稍后重试。
FAILEDOPERATION_ASYNCEVENTSTATUS = 'FailedOperation.AsyncEventStatus'
# 登录信息验证失败,token 验证失败。
FAILEDOPERATION_AUTHFAILURE = 'FailedOperation.AuthFailure'
# 请求role信息失败。
FAILEDOPERATION_CALLROLEFAILED = 'FailedOperation.CallRoleFailed'
# CopyAsyncRun 传参异常。
FAILEDOPERATION_COPYASYNCRUN = 'FailedOperation.CopyAsyncRun'
# 复制函数失败。
FAILEDOPERATION_COPYFAILED = 'FailedOperation.CopyFailed'
# 不支持复制到该地域。
FAILEDOPERATION_COPYFUNCTION = 'FailedOperation.CopyFunction'
# 操作COS资源失败。
FAILEDOPERATION_COS = 'FailedOperation.Cos'
# 创建别名失败。
FAILEDOPERATION_CREATEALIAS = 'FailedOperation.CreateAlias'
# 操作失败。
FAILEDOPERATION_CREATEFUNCTION = 'FailedOperation.CreateFunction'
# 创建命名空间失败。
FAILEDOPERATION_CREATENAMESPACE = 'FailedOperation.CreateNamespace'
# 当前函数状态无法进行此操作。
FAILEDOPERATION_CREATETRIGGER = 'FailedOperation.CreateTrigger'
# 当前调试状态无法执行此操作。
FAILEDOPERATION_DEBUGMODESTATUS = 'FailedOperation.DebugModeStatus'
# 调试状态下无法更新执行超时时间。
FAILEDOPERATION_DEBUGMODEUPDATETIMEOUTFAIL = 'FailedOperation.DebugModeUpdateTimeOutFail'
# 删除别名失败。
FAILEDOPERATION_DELETEALIAS = 'FailedOperation.DeleteAlias'
# 当前函数状态无法进行此操作,请在函数状态正常时重试。
FAILEDOPERATION_DELETEFUNCTION = 'FailedOperation.DeleteFunction'
# 删除layer版本失败。
FAILEDOPERATION_DELETELAYERVERSION = 'FailedOperation.DeleteLayerVersion'
# 无法删除默认Namespace。
FAILEDOPERATION_DELETENAMESPACE = 'FailedOperation.DeleteNamespace'
# 删除触发器失败。
FAILEDOPERATION_DELETETRIGGER = 'FailedOperation.DeleteTrigger'
# 当前函数状态无法更新代码,请在状态为正常时更新。
FAILEDOPERATION_FUNCTIONNAMESTATUSERROR = 'FailedOperation.FunctionNameStatusError'
# 函数在部署中,无法做此操作。
FAILEDOPERATION_FUNCTIONSTATUSERROR = 'FailedOperation.FunctionStatusError'
# 当前函数版本状态无法进行此操作,请在版本状态为正常时重试。
FAILEDOPERATION_FUNCTIONVERSIONSTATUSNOTACTIVE = 'FailedOperation.FunctionVersionStatusNotActive'
# 获取别名信息失败。
FAILEDOPERATION_GETALIAS = 'FailedOperation.GetAlias'
# 获取函数代码地址失败。
FAILEDOPERATION_GETFUNCTIONADDRESS = 'FailedOperation.GetFunctionAddress'
# InstanceNotFound 实例不存在。
FAILEDOPERATION_INSTANCENOTFOUND = 'FailedOperation.InstanceNotFound'
# 当前账号或命名空间处于欠费状态,请在可用时重试。
FAILEDOPERATION_INSUFFICIENTBALANCE = 'FailedOperation.InsufficientBalance'
# 调用函数失败。
FAILEDOPERATION_INVOKEFUNCTION = 'FailedOperation.InvokeFunction'
# 命名空间已存在,请勿重复创建。
FAILEDOPERATION_NAMESPACE = 'FailedOperation.Namespace'
# 服务开通失败。
FAILEDOPERATION_OPENSERVICE = 'FailedOperation.OpenService'
# 操作冲突。
FAILEDOPERATION_OPERATIONCONFLICT = 'FailedOperation.OperationConflict'
# 创建定时预置任务失败。
FAILEDOPERATION_PROVISIONCREATETIMER = 'FailedOperation.ProvisionCreateTimer'
# 删除定时预置任务失败。
FAILEDOPERATION_PROVISIONDELETETIMER = 'FailedOperation.ProvisionDeleteTimer'
# 预置超过可用。
FAILEDOPERATION_PROVISIONEDEXCEEDAVAILABLE = 'FailedOperation.ProvisionedExceedAvailable'
# 预置超限。
FAILEDOPERATION_PROVISIONEDEXCEEDRESERVED = 'FailedOperation.ProvisionedExceedReserved'
# 当前函数版本已有预置任务处于进行中,请稍后重试。
FAILEDOPERATION_PROVISIONEDINPROGRESS = 'FailedOperation.ProvisionedInProgress'
# 发布layer版本失败。
FAILEDOPERATION_PUBLISHLAYERVERSION = 'FailedOperation.PublishLayerVersion'
# 当前函数状态无法发布版本,请在状态为正常时发布。
FAILEDOPERATION_PUBLISHVERSION = 'FailedOperation.PublishVersion'
# 角色不存在。
FAILEDOPERATION_QCSROLENOTFOUND = 'FailedOperation.QcsRoleNotFound'
# ReservedExceedTotal 总保留超限。
FAILEDOPERATION_RESERVEDEXCEEDTOTAL = 'FailedOperation.ReservedExceedTotal'
# 当前函数已有保留并发设置任务处于进行中,请稍后重试。
FAILEDOPERATION_RESERVEDINPROGRESS = 'FailedOperation.ReservedInProgress'
# ServiceClosed 请确认后再操作。
FAILEDOPERATION_SERVICECLOSED = 'FailedOperation.ServiceClosed'
# Topic不存在。
FAILEDOPERATION_TOPICNOTEXIST = 'FailedOperation.TopicNotExist'
# 用户并发内存配额设置任务处于进行中,请稍后重试。
FAILEDOPERATION_TOTALCONCURRENCYMEMORYINPROGRESS = 'FailedOperation.TotalConcurrencyMemoryInProgress'
# 指定的服务未开通,可以提交工单申请开通服务。
FAILEDOPERATION_UNOPENEDSERVICE = 'FailedOperation.UnOpenedService'
# 更新别名失败。
FAILEDOPERATION_UPDATEALIAS = 'FailedOperation.UpdateAlias'
# 当前函数状态无法更新代码,请在状态为正常时更新。
FAILEDOPERATION_UPDATEFUNCTIONCODE = 'FailedOperation.UpdateFunctionCode'
# UpdateFunctionConfiguration操作失败。
FAILEDOPERATION_UPDATEFUNCTIONCONFIGURATION = 'FailedOperation.UpdateFunctionConfiguration'
# 内部错误。
INTERNALERROR = 'InternalError'
# 创建apigw触发器内部错误。
INTERNALERROR_APIGATEWAY = 'InternalError.ApiGateway'
# ckafka接口失败。
INTERNALERROR_CKAFKA = 'InternalError.Ckafka'
# 删除cmq触发器失败。
INTERNALERROR_CMQ = 'InternalError.Cmq'
# 更新触发器失败。
INTERNALERROR_COS = 'InternalError.Cos'
# ES错误。
INTERNALERROR_ES = 'InternalError.ES'
# 内部服务异常。
INTERNALERROR_EXCEPTION = 'InternalError.Exception'
# 内部服务错误。
INTERNALERROR_GETROLEERROR = 'InternalError.GetRoleError'
# 内部系统错误。
INTERNALERROR_SYSTEM = 'InternalError.System'
# 内部服务错误。
INTERNALERROR_SYSTEMERROR = 'InternalError.SystemError'
# FunctionName取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETER_FUNCTIONNAME = 'InvalidParameter.FunctionName'
# 创建函数传参异常。
INVALIDPARAMETER_PARAMERROR = 'InvalidParameter.ParamError'
# 请求参数不合法。
INVALIDPARAMETER_PAYLOAD = 'InvalidParameter.Payload'
# 请求大小超限。
INVALIDPARAMETER_REQUESTTOOLARGE = 'InvalidParameter.RequestTooLarge'
# RoleCheck 传参有误。
INVALIDPARAMETER_ROLECHECK = 'InvalidParameter.RoleCheck'
# RoutingConfig参数传入错误。
INVALIDPARAMETER_ROUTINGCONFIG = 'InvalidParameter.RoutingConfig'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# Action取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ACTION = 'InvalidParameterValue.Action'
# AdditionalVersionWeights参数传入错误。
INVALIDPARAMETERVALUE_ADDITIONALVERSIONWEIGHTS = 'InvalidParameterValue.AdditionalVersionWeights'
# 不支持删除默认别名,请修正后重试。
INVALIDPARAMETERVALUE_ALIAS = 'InvalidParameterValue.Alias'
# ApiGateway参数错误。
INVALIDPARAMETERVALUE_APIGATEWAY = 'InvalidParameterValue.ApiGateway'
# ApmConfig参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIG = 'InvalidParameterValue.ApmConfig'
# ApmConfigInstanceId参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIGINSTANCEID = 'InvalidParameterValue.ApmConfigInstanceId'
# ApmConfigRegion参数传入错误。
INVALIDPARAMETERVALUE_APMCONFIGREGION = 'InvalidParameterValue.ApmConfigRegion'
# Args 参数值有误。
INVALIDPARAMETERVALUE_ARGS = 'InvalidParameterValue.Args'
# AsyncRunEnable 取值不正确。
INVALIDPARAMETERVALUE_ASYNCRUNENABLE = 'InvalidParameterValue.AsyncRunEnable'
# 函数异步重试配置参数无效。
INVALIDPARAMETERVALUE_ASYNCTRIGGERCONFIG = 'InvalidParameterValue.AsyncTriggerConfig'
# Cdn传入错误。
INVALIDPARAMETERVALUE_CDN = 'InvalidParameterValue.Cdn'
# cfs配置项重复。
INVALIDPARAMETERVALUE_CFSPARAMETERDUPLICATE = 'InvalidParameterValue.CfsParameterDuplicate'
# cfs配置项取值与规范不符。
INVALIDPARAMETERVALUE_CFSPARAMETERERROR = 'InvalidParameterValue.CfsParameterError'
# cfs参数格式与规范不符。
INVALIDPARAMETERVALUE_CFSSTRUCTIONERROR = 'InvalidParameterValue.CfsStructionError'
# Ckafka传入错误。
INVALIDPARAMETERVALUE_CKAFKA = 'InvalidParameterValue.Ckafka'
# 运行函数时的参数传入有误。
INVALIDPARAMETERVALUE_CLIENTCONTEXT = 'InvalidParameterValue.ClientContext'
# Cls传入错误。
INVALIDPARAMETERVALUE_CLS = 'InvalidParameterValue.Cls'
# 修改Cls配置需要传入Role参数,请修正后重试。
INVALIDPARAMETERVALUE_CLSROLE = 'InvalidParameterValue.ClsRole'
# Cmq传入错误。
INVALIDPARAMETERVALUE_CMQ = 'InvalidParameterValue.Cmq'
# Code传入错误。
INVALIDPARAMETERVALUE_CODE = 'InvalidParameterValue.Code'
# CodeSecret传入错误。
INVALIDPARAMETERVALUE_CODESECRET = 'InvalidParameterValue.CodeSecret'
# CodeSource传入错误。
INVALIDPARAMETERVALUE_CODESOURCE = 'InvalidParameterValue.CodeSource'
# Command[Entrypoint] 参数值有误。
INVALIDPARAMETERVALUE_COMMAND = 'InvalidParameterValue.Command'
# CompatibleRuntimes参数传入错误。
INVALIDPARAMETERVALUE_COMPATIBLERUNTIMES = 'InvalidParameterValue.CompatibleRuntimes'
# Content参数传入错误。
INVALIDPARAMETERVALUE_CONTENT = 'InvalidParameterValue.Content'
# Cos传入错误。
INVALIDPARAMETERVALUE_COS = 'InvalidParameterValue.Cos'
# CosBucketName不符合规范。
INVALIDPARAMETERVALUE_COSBUCKETNAME = 'InvalidParameterValue.CosBucketName'
# CosBucketRegion取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_COSBUCKETREGION = 'InvalidParameterValue.CosBucketRegion'
# CosObjectName不符合规范。
INVALIDPARAMETERVALUE_COSOBJECTNAME = 'InvalidParameterValue.CosObjectName'
# CustomArgument参数长度超限。
INVALIDPARAMETERVALUE_CUSTOMARGUMENT = 'InvalidParameterValue.CustomArgument'
# DateTime传入错误。
INVALIDPARAMETERVALUE_DATETIME = 'InvalidParameterValue.DateTime'
# DeadLetterConfig取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_DEADLETTERCONFIG = 'InvalidParameterValue.DeadLetterConfig'
# 默认Namespace无法创建。
INVALIDPARAMETERVALUE_DEFAULTNAMESPACE = 'InvalidParameterValue.DefaultNamespace'
# DemoID 对应的函数模板 ,code 参数值有误,请确认后重试。
INVALIDPARAMETERVALUE_DEMO = 'InvalidParameterValue.Demo'
# DemoId 不存在。
INVALIDPARAMETERVALUE_DEMOID = 'InvalidParameterValue.DemoId'
# Description传入错误。
INVALIDPARAMETERVALUE_DESCRIPTION = 'InvalidParameterValue.Description'
# 环境变量DNS[OS_NAMESERVER]配置有误。
INVALIDPARAMETERVALUE_DNSINFO = 'InvalidParameterValue.DnsInfo'
# DynamicEnabled 参数传入错误。
INVALIDPARAMETERVALUE_DYNAMICENABLED = 'InvalidParameterValue.DynamicEnabled'
# EipConfig参数错误。
INVALIDPARAMETERVALUE_EIPCONFIG = 'InvalidParameterValue.EipConfig'
# Enable取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ENABLE = 'InvalidParameterValue.Enable'
# Environment传入错误。
INVALIDPARAMETERVALUE_ENVIRONMENT = 'InvalidParameterValue.Environment'
# 环境变量大小超限,请保持在 4KB 以内。
INVALIDPARAMETERVALUE_ENVIRONMENTEXCEEDEDLIMIT = 'InvalidParameterValue.EnvironmentExceededLimit'
# 不支持修改函数系统环境变量和运行环境变量。
INVALIDPARAMETERVALUE_ENVIRONMENTSYSTEMPROTECT = 'InvalidParameterValue.EnvironmentSystemProtect'
# Filters参数错误。
INVALIDPARAMETERVALUE_FILTERS = 'InvalidParameterValue.Filters'
# Function取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_FUNCTION = 'InvalidParameterValue.Function'
# 函数不存在。
INVALIDPARAMETERVALUE_FUNCTIONNAME = 'InvalidParameterValue.FunctionName'
# 请求 id 传参错误。
INVALIDPARAMETERVALUE_FUNCTIONREQUESTID = 'InvalidParameterValue.FunctionRequestId'
# FunctionType参数错误。
INVALIDPARAMETERVALUE_FUNCTIONTYPE = 'InvalidParameterValue.FunctionType'
# GitBranch不符合规范。
INVALIDPARAMETERVALUE_GITBRANCH = 'InvalidParameterValue.GitBranch'
# GitCommitId取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_GITCOMMITID = 'InvalidParameterValue.GitCommitId'
# GitDirectory不符合规范。
INVALIDPARAMETERVALUE_GITDIRECTORY = 'InvalidParameterValue.GitDirectory'
# GitPassword不符合规范。
INVALIDPARAMETERVALUE_GITPASSWORD = 'InvalidParameterValue.GitPassword'
# GitPasswordSecret 传参有误。
INVALIDPARAMETERVALUE_GITPASSWORDSECRET = 'InvalidParameterValue.GitPasswordSecret'
# GitUrl不符合规范。
INVALIDPARAMETERVALUE_GITURL = 'InvalidParameterValue.GitUrl'
# GitUserName不符合规范。
INVALIDPARAMETERVALUE_GITUSERNAME = 'InvalidParameterValue.GitUserName'
# GitUserNameSecret 传参有误。
INVALIDPARAMETERVALUE_GITUSERNAMESECRET = 'InvalidParameterValue.GitUserNameSecret'
# Handler传入错误。
INVALIDPARAMETERVALUE_HANDLER = 'InvalidParameterValue.Handler'
# IdleTimeOut参数传入错误。
INVALIDPARAMETERVALUE_IDLETIMEOUT = 'InvalidParameterValue.IdleTimeOut'
# ImageType 参数值有误。
INVALIDPARAMETERVALUE_IMAGETYPE = 'InvalidParameterValue.ImageType'
# imageUri 传入有误。
INVALIDPARAMETERVALUE_IMAGEURI = 'InvalidParameterValue.ImageUri'
# InlineZipFile非法。
INVALIDPARAMETERVALUE_INLINEZIPFILE = 'InvalidParameterValue.InlineZipFile'
# InstanceConcurrencyConfig 参数传入错误。
INVALIDPARAMETERVALUE_INSTANCECONCURRENCYCONFIG = 'InvalidParameterValue.InstanceConcurrencyConfig'
# InvokeType取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_INVOKETYPE = 'InvalidParameterValue.InvokeType'
# L5Enable取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_L5ENABLE = 'InvalidParameterValue.L5Enable'
# LayerName参数传入错误。
INVALIDPARAMETERVALUE_LAYERNAME = 'InvalidParameterValue.LayerName'
# Layers参数传入错误。
INVALIDPARAMETERVALUE_LAYERS = 'InvalidParameterValue.Layers'
# Limit传入错误。
INVALIDPARAMETERVALUE_LIMIT = 'InvalidParameterValue.Limit'
# 参数超出长度限制。
INVALIDPARAMETERVALUE_LIMITEXCEEDED = 'InvalidParameterValue.LimitExceeded'
# MaxConcurrency 参数传入错误。
INVALIDPARAMETERVALUE_MAXCONCURRENCY = 'InvalidParameterValue.MaxConcurrency'
# Memory取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_MEMORY = 'InvalidParameterValue.Memory'
# MemorySize错误。
INVALIDPARAMETERVALUE_MEMORYSIZE = 'InvalidParameterValue.MemorySize'
# MinCapacity 参数传入错误。
INVALIDPARAMETERVALUE_MINCAPACITY = 'InvalidParameterValue.MinCapacity'
# Name参数传入错误。
INVALIDPARAMETERVALUE_NAME = 'InvalidParameterValue.Name'
# Namespace参数传入错误。
INVALIDPARAMETERVALUE_NAMESPACE = 'InvalidParameterValue.Namespace'
# 规则不正确,Namespace为英文字母、数字、-_ 符号组成,长度30。
INVALIDPARAMETERVALUE_NAMESPACEINVALID = 'InvalidParameterValue.NamespaceInvalid'
# NodeSpec 参数传入错误。
INVALIDPARAMETERVALUE_NODESPEC = 'InvalidParameterValue.NodeSpec'
# NodeType 参数传入错误。
INVALIDPARAMETERVALUE_NODETYPE = 'InvalidParameterValue.NodeType'
# 偏移量不合法。
INVALIDPARAMETERVALUE_OFFSET = 'InvalidParameterValue.Offset'
# Order传入错误。
INVALIDPARAMETERVALUE_ORDER = 'InvalidParameterValue.Order'
# OrderBy取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ORDERBY = 'InvalidParameterValue.OrderBy'
# 入参不是标准的json。
INVALIDPARAMETERVALUE_PARAM = 'InvalidParameterValue.Param'
# ProtocolType参数传入错误。
INVALIDPARAMETERVALUE_PROTOCOLTYPE = 'InvalidParameterValue.ProtocolType'
# 定时预置的cron配置重复。
INVALIDPARAMETERVALUE_PROVISIONTRIGGERCRONCONFIGDUPLICATE = 'InvalidParameterValue.ProvisionTriggerCronConfigDuplicate'
# TriggerName参数传入错误。
INVALIDPARAMETERVALUE_PROVISIONTRIGGERNAME = 'InvalidParameterValue.ProvisionTriggerName'
# TriggerName重复。
INVALIDPARAMETERVALUE_PROVISIONTRIGGERNAMEDUPLICATE = 'InvalidParameterValue.ProvisionTriggerNameDuplicate'
# ProvisionType 参数传入错误。
INVALIDPARAMETERVALUE_PROVISIONTYPE = 'InvalidParameterValue.ProvisionType'
# PublicNetConfig参数错误。
INVALIDPARAMETERVALUE_PUBLICNETCONFIG = 'InvalidParameterValue.PublicNetConfig'
# 不支持的函数版本。
INVALIDPARAMETERVALUE_QUALIFIER = 'InvalidParameterValue.Qualifier'
# 查询版本详情,版本参数传入错误。
INVALIDPARAMETERVALUE_QUERYVERSION = 'InvalidParameterValue.QueryVersion'
# 企业版镜像实例ID[RegistryId]传值错误。
INVALIDPARAMETERVALUE_REGISTRYID = 'InvalidParameterValue.RegistryId'
# RetCode不合法。
INVALIDPARAMETERVALUE_RETCODE = 'InvalidParameterValue.RetCode'
# RoutingConfig取值与规范不符,请修正后再试。可参考:https://tencentcs.com/5jXKFnBW。
INVALIDPARAMETERVALUE_ROUTINGCONFIG = 'InvalidParameterValue.RoutingConfig'
# Runtime传入错误。
INVALIDPARAMETERVALUE_RUNTIME = 'InvalidParameterValue.Runtime'
# searchkey 不是 Keyword,Tag 或者 Runtime。
INVALIDPARAMETERVALUE_SEARCHKEY = 'InvalidParameterValue.SearchKey'
# SecretInfo错误。
INVALIDPARAMETERVALUE_SECRETINFO = 'InvalidParameterValue.SecretInfo'
# ServiceName命名不规范。
INVALIDPARAMETERVALUE_SERVICENAME = 'InvalidParameterValue.ServiceName'
# Stamp取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_STAMP = 'InvalidParameterValue.Stamp'
# 起始时间传入错误。
INVALIDPARAMETERVALUE_STARTTIME = 'InvalidParameterValue.StartTime'
# 需要同时指定开始日期与结束日期。
INVALIDPARAMETERVALUE_STARTTIMEORENDTIME = 'InvalidParameterValue.StartTimeOrEndTime'
# Status取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_STATUS = 'InvalidParameterValue.Status'
# 系统环境变量错误。
INVALIDPARAMETERVALUE_SYSTEMENVIRONMENT = 'InvalidParameterValue.SystemEnvironment'
# 非法的TempCosObjectName。
INVALIDPARAMETERVALUE_TEMPCOSOBJECTNAME = 'InvalidParameterValue.TempCosObjectName'
# TraceEnable取值与规范不符,请修正后再试。
INVALIDPARAMETERVALUE_TRACEENABLE = 'InvalidParameterValue.TraceEnable'
# TrackingTarget 参数输入错误。
INVALIDPARAMETERVALUE_TRACKINGTARGET = 'InvalidParameterValue.TrackingTarget'
# TriggerCronConfig参数传入错误。
INVALIDPARAMETERVALUE_TRIGGERCRONCONFIG = 'InvalidParameterValue.TriggerCronConfig'
# TriggerCronConfig参数定时触发间隔小于指定值。
INVALIDPARAMETERVALUE_TRIGGERCRONCONFIGTIMEINTERVAL = 'InvalidParameterValue.TriggerCronConfigTimeInterval'
# TriggerDesc传入参数错误。
INVALIDPARAMETERVALUE_TRIGGERDESC = 'InvalidParameterValue.TriggerDesc'
# TriggerName传入错误。
INVALIDPARAMETERVALUE_TRIGGERNAME = 'InvalidParameterValue.TriggerName'
# TriggerProvisionedConcurrencyNum参数传入错误。
INVALIDPARAMETERVALUE_TRIGGERPROVISIONEDCONCURRENCYNUM = 'InvalidParameterValue.TriggerProvisionedConcurrencyNum'
# Type传入错误。
INVALIDPARAMETERVALUE_TYPE = 'InvalidParameterValue.Type'
# 开启cfs配置的同时必须开启vpc。
INVALIDPARAMETERVALUE_VPCNOTSETWHENOPENCFS = 'InvalidParameterValue.VpcNotSetWhenOpenCfs'
# WebSocketsParams参数传入错误。
INVALIDPARAMETERVALUE_WEBSOCKETSPARAMS = 'InvalidParameterValue.WebSocketsParams'
# 检测到不是标准的zip文件,请重新压缩后再试。
INVALIDPARAMETERVALUE_ZIPFILE = 'InvalidParameterValue.ZipFile'
# 压缩文件base64解码失败: `Incorrect padding`,请修正后再试。
INVALIDPARAMETERVALUE_ZIPFILEBASE64BINASCIIERROR = 'InvalidParameterValue.ZipFileBase64BinasciiError'
# 别名个数超过最大限制。
LIMITEXCEEDED_ALIAS = 'LimitExceeded.Alias'
# Cdn使用超过最大限制。
LIMITEXCEEDED_CDN = 'LimitExceeded.Cdn'
# 用户开启镜像加速函数版本超限。
LIMITEXCEEDED_CONTAINERIMAGEACCELERATEQUOTA = 'LimitExceeded.ContainerImageAccelerateQuota'
# eip资源超限。
LIMITEXCEEDED_EIP = 'LimitExceeded.Eip'
# 函数数量超出最大限制 ,可通过[提交工单](https://cloud.tencent.com/act/event/Online_service?from=scf%7Cindex)申请提升限制。
LIMITEXCEEDED_FUNCTION = 'LimitExceeded.Function'
# 同一个主题下的函数超过最大限制。
LIMITEXCEEDED_FUNCTIONONTOPIC = 'LimitExceeded.FunctionOnTopic'
# FunctionProvisionedConcurrencyMemory数量达到限制,可提交工单申请提升限制:https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_FUNCTIONPROVISIONEDCONCURRENCYMEMORY = 'LimitExceeded.FunctionProvisionedConcurrencyMemory'
# 函数保留并发内存超限。
LIMITEXCEEDED_FUNCTIONRESERVEDCONCURRENCYMEMORY = 'LimitExceeded.FunctionReservedConcurrencyMemory'
# FunctionTotalProvisionedConcurrencyMemory达到限制,可提交工单申请提升限制:https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_FUNCTIONTOTALPROVISIONEDCONCURRENCYMEMORY = 'LimitExceeded.FunctionTotalProvisionedConcurrencyMemory'
# 函数预置并发总数达到限制。
LIMITEXCEEDED_FUNCTIONTOTALPROVISIONEDCONCURRENCYNUM = 'LimitExceeded.FunctionTotalProvisionedConcurrencyNum'
# InitTimeout达到限制,可提交工单申请提升限制:https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_INITTIMEOUT = 'LimitExceeded.InitTimeout'
# layer版本数量超出最大限制。
LIMITEXCEEDED_LAYERVERSIONS = 'LimitExceeded.LayerVersions'
# layer数量超出最大限制。
LIMITEXCEEDED_LAYERS = 'LimitExceeded.Layers'
# 动态扩容最大值超限。
LIMITEXCEEDED_MAXCAPACITY = 'LimitExceeded.MaxCapacity'
# 内存超出最大限制。
LIMITEXCEEDED_MEMORY = 'LimitExceeded.Memory'
# 函数异步重试配置消息保留时间超过限制。
LIMITEXCEEDED_MSGTTL = 'LimitExceeded.MsgTTL'
# 命名空间数量超过最大限制,可通过[提交工单](https://cloud.tencent.com/act/event/Online_service?from=scf%7Cindex)申请提升限制。
LIMITEXCEEDED_NAMESPACE = 'LimitExceeded.Namespace'
# Offset超出限制。
LIMITEXCEEDED_OFFSET = 'LimitExceeded.Offset'
# 定时预置数量超过最大限制。
LIMITEXCEEDED_PROVISIONTRIGGERACTION = 'LimitExceeded.ProvisionTriggerAction'
# 定时触发间隔小于最大限制。
LIMITEXCEEDED_PROVISIONTRIGGERINTERVAL = 'LimitExceeded.ProvisionTriggerInterval'
# 配额超限。
LIMITEXCEEDED_QUOTA = 'LimitExceeded.Quota'
# 函数异步重试配置异步重试次数超过限制。
LIMITEXCEEDED_RETRYNUM = 'LimitExceeded.RetryNum'
# Timeout超出最大限制。
LIMITEXCEEDED_TIMEOUT = 'LimitExceeded.Timeout'
# 用户并发内存配额超限。
LIMITEXCEEDED_TOTALCONCURRENCYMEMORY = 'LimitExceeded.TotalConcurrencyMemory'
# 触发器数量超出最大限制,可通过[提交工单](https://cloud.tencent.com/act/event/Online_service?from=scf%7Cindex)申请提升限制。
LIMITEXCEEDED_TRIGGER = 'LimitExceeded.Trigger'
# UserTotalConcurrencyMemory达到限制,可提交工单申请提升限制:https://tencentcs.com/7Fixwt63。
LIMITEXCEEDED_USERTOTALCONCURRENCYMEMORY = 'LimitExceeded.UserTotalConcurrencyMemory'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# Code没有传入。
MISSINGPARAMETER_CODE = 'MissingParameter.Code'
# 缺失 Runtime 字段。
MISSINGPARAMETER_RUNTIME = 'MissingParameter.Runtime'
# 资源被占用。
RESOURCEINUSE = 'ResourceInUse'
# Alias已被占用。
RESOURCEINUSE_ALIAS = 'ResourceInUse.Alias'
# Cdn已被占用。
RESOURCEINUSE_CDN = 'ResourceInUse.Cdn'
# Cmq已被占用。
RESOURCEINUSE_CMQ = 'ResourceInUse.Cmq'
# Cos已被占用。
RESOURCEINUSE_COS = 'ResourceInUse.Cos'
# 函数已存在。
RESOURCEINUSE_FUNCTION = 'ResourceInUse.Function'
# FunctionName已存在。
RESOURCEINUSE_FUNCTIONNAME = 'ResourceInUse.FunctionName'
# Layer版本正在使用中。
RESOURCEINUSE_LAYERVERSION = 'ResourceInUse.LayerVersion'
# Namespace已存在。
RESOURCEINUSE_NAMESPACE = 'ResourceInUse.Namespace'
# TriggerName已存在。
RESOURCEINUSE_TRIGGER = 'ResourceInUse.Trigger'
# TriggerName已存在。
RESOURCEINUSE_TRIGGERNAME = 'ResourceInUse.TriggerName'
# COS资源不足。
RESOURCEINSUFFICIENT_COS = 'ResourceInsufficient.COS'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# 别名不存在。
RESOURCENOTFOUND_ALIAS = 'ResourceNotFound.Alias'
# 未找到指定的AsyncEvent,请创建后再试。
RESOURCENOTFOUND_ASYNCEVENT = 'ResourceNotFound.AsyncEvent'
# Cdn不存在。
RESOURCENOTFOUND_CDN = 'ResourceNotFound.Cdn'
# 指定的cfs下未找到您所指定的挂载点。
RESOURCENOTFOUND_CFSMOUNTINSNOTMATCH = 'ResourceNotFound.CfsMountInsNotMatch'
# CfsProtocolError 参数异常。
RESOURCENOTFOUND_CFSPROTOCOLERROR = 'ResourceNotFound.CfsProtocolError'
# 检测cfs状态为不可用。
RESOURCENOTFOUND_CFSSTATUSERROR = 'ResourceNotFound.CfsStatusError'
# cfs与云函数所处vpc不一致。
RESOURCENOTFOUND_CFSVPCNOTMATCH = 'ResourceNotFound.CfsVpcNotMatch'
# Ckafka不存在。
RESOURCENOTFOUND_CKAFKA = 'ResourceNotFound.Ckafka'
# Cmq不存在。
RESOURCENOTFOUND_CMQ = 'ResourceNotFound.Cmq'
# Cos不存在。
RESOURCENOTFOUND_COS = 'ResourceNotFound.Cos'
# 不存在的Demo。
RESOURCENOTFOUND_DEMO = 'ResourceNotFound.Demo'
# 函数不存在。
RESOURCENOTFOUND_FUNCTION = 'ResourceNotFound.Function'
# 函数不存在。
RESOURCENOTFOUND_FUNCTIONNAME = 'ResourceNotFound.FunctionName'
# 函数版本不存在。
RESOURCENOTFOUND_FUNCTIONVERSION = 'ResourceNotFound.FunctionVersion'
# 获取cfs挂载点信息错误。
RESOURCENOTFOUND_GETCFSMOUNTINSERROR = 'ResourceNotFound.GetCfsMountInsError'
# 获取cfs信息错误。
RESOURCENOTFOUND_GETCFSNOTMATCH = 'ResourceNotFound.GetCfsNotMatch'
# 未找到指定的ImageConfig,请创建后再试。
RESOURCENOTFOUND_IMAGECONFIG = 'ResourceNotFound.ImageConfig'
# layer不存在。
RESOURCENOTFOUND_LAYER = 'ResourceNotFound.Layer'
# Layer版本不存在。
RESOURCENOTFOUND_LAYERVERSION = 'ResourceNotFound.LayerVersion'
# Namespace不存在。
RESOURCENOTFOUND_NAMESPACE = 'ResourceNotFound.Namespace'
# 版本不存在。
RESOURCENOTFOUND_QUALIFIER = 'ResourceNotFound.Qualifier'
# 角色不存在。
RESOURCENOTFOUND_ROLE = 'ResourceNotFound.Role'
# Role不存在。
RESOURCENOTFOUND_ROLECHECK = 'ResourceNotFound.RoleCheck'
# Timer不存在。
RESOURCENOTFOUND_TIMER = 'ResourceNotFound.Timer'
# 并发内存配额资源未找到。
RESOURCENOTFOUND_TOTALCONCURRENCYMEMORY = 'ResourceNotFound.TotalConcurrencyMemory'
# 触发器不存在。
RESOURCENOTFOUND_TRIGGER = 'ResourceNotFound.Trigger'
# 版本不存在。
RESOURCENOTFOUND_VERSION = 'ResourceNotFound.Version'
# VPC或子网不存在。
RESOURCENOTFOUND_VPC = 'ResourceNotFound.Vpc'
# 余额不足,请先充值。
RESOURCEUNAVAILABLE_INSUFFICIENTBALANCE = 'ResourceUnavailable.InsufficientBalance'
# Namespace不可用。
RESOURCEUNAVAILABLE_NAMESPACE = 'ResourceUnavailable.Namespace'
# 未授权操作。
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'
# CAM鉴权失败。
UNAUTHORIZEDOPERATION_CAM = 'UnauthorizedOperation.CAM'
# 无访问代码权限。
UNAUTHORIZEDOPERATION_CODESECRET = 'UnauthorizedOperation.CodeSecret'
# 没有权限。
UNAUTHORIZEDOPERATION_CREATETRIGGER = 'UnauthorizedOperation.CreateTrigger'
# 没有权限的操作。
UNAUTHORIZEDOPERATION_DELETEFUNCTION = 'UnauthorizedOperation.DeleteFunction'
# 没有权限。
UNAUTHORIZEDOPERATION_DELETETRIGGER = 'UnauthorizedOperation.DeleteTrigger'
# 不是从控制台调用的该接口。
UNAUTHORIZEDOPERATION_NOTMC = 'UnauthorizedOperation.NotMC'
# Region错误。
UNAUTHORIZEDOPERATION_REGION = 'UnauthorizedOperation.Region'
# 没有权限访问您的Cos资源。
UNAUTHORIZEDOPERATION_ROLE = 'UnauthorizedOperation.Role'
# TempCos的Appid和请求账户的APPID不一致。
UNAUTHORIZEDOPERATION_TEMPCOSAPPID = 'UnauthorizedOperation.TempCosAppid'
# 无法进行此操作。
UNAUTHORIZEDOPERATION_UPDATEFUNCTIONCODE = 'UnauthorizedOperation.UpdateFunctionCode'
# 操作不支持。
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
# 资源还有别名绑定,不支持当前操作,请解绑别名后重试。
UNSUPPORTEDOPERATION_ALIASBIND = 'UnsupportedOperation.AliasBind'
# 指定的配置AsyncRunEnable暂不支持,请修正后再试。
UNSUPPORTEDOPERATION_ASYNCRUNENABLE = 'UnsupportedOperation.AsyncRunEnable'
# Cdn不支持。
UNSUPPORTEDOPERATION_CDN = 'UnsupportedOperation.Cdn'
# Cos操作不支持。
UNSUPPORTEDOPERATION_COS = 'UnsupportedOperation.Cos'
# 指定的配置EipFixed暂不支持。
UNSUPPORTEDOPERATION_EIPFIXED = 'UnsupportedOperation.EipFixed'
# 不支持此地域。
UNSUPPORTEDOPERATION_REGION = 'UnsupportedOperation.Region'
# Trigger操作不支持。
UNSUPPORTEDOPERATION_TRIGGER = 'UnsupportedOperation.Trigger'
# 指定的配置暂不支持,请修正后再试。
UNSUPPORTEDOPERATION_UPDATEFUNCTIONEVENTINVOKECONFIG = 'UnsupportedOperation.UpdateFunctionEventInvokeConfig'
# 指定的配置VpcConfig暂不支持。
UNSUPPORTEDOPERATION_VPCCONFIG = 'UnsupportedOperation.VpcConfig'
|
{
"content_hash": "a5bcd6a3a8fbfc80dd8e1da515dcd47d",
"timestamp": "",
"source": "github",
"line_count": 820,
"max_line_length": 119,
"avg_line_length": 29.76951219512195,
"alnum_prop": 0.8446192290360903,
"repo_name": "tzpBingo/github-trending",
"id": "02daac0195c759a79be57085148ca6cb767ff6d2",
"size": "30270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codespace/python/tencentcloud/scf/v20180416/errorcodes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "11470"
},
{
"name": "HTML",
"bytes": "1543"
},
{
"name": "Python",
"bytes": "49985109"
},
{
"name": "Shell",
"bytes": "18039"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
"""
"""
__author__ = "Jerome Samson"
__copyright__ = "Copyright 2015, Mikros Image"
import logging
from datetime import datetime
try:
import simplejson as json
except ImportError:
import json
from puliclient.server.server import Server, RequestError, RequestTimeoutError
from puliclient.server.server import request
class RenderNode(object):
'''
'''
#
# Private
#
def __init__(self, rnDict=None):
# Sys infos
self.id = 0
self.name = ""
self.coresNumber = 0
self.ramSize = ""
self.speed = 0
# Dynamic sys infos
self.systemFreeRam = 0
self.systemSwapPercentage = 0
# Worker state
self.puliversion = ""
self.commands = {}
self.status = 0
self.host = ""
self.port = 0
# self.pools = []
self.caracteristics = {}
self.performance = 0.0
self.excluded = False
# Timers
self.createDate = 0
self.registerDate = 0
self.lastAliveTime = 0
if rnDict:
self._createFromDict(rnDict)
# workerHistory (list state changes and user actions)
# commandHistory
def __repr__(self):
return "RenderNode(%s)" % self.name
def __str__(self):
return "%s" % self.name
def encode(self, indent=0):
res = {}
for field in self.__dict__:
res[field] = getattr(self, field)
return res
def _createFromDict(self, rnDict):
"""
:param rnDict:
:return boolean: Indicating success
"""
for key, val in rnDict.iteritems():
if hasattr(self, key):
setattr(self, key, val)
# Specific transformation
# self.speed = rnDict.get("createDate")
# self.createDate = datetime.fromtimestamp(rnDict.get("createDate"))
# self.registerDate = datetime.fromtimestamp(rnDict.get("registerDate"))
# self.lastAliveTime = datetime.fromtimestamp(rnDict.get("lastAliveTime"))
return True
def createFromNode(self, node):
# Core infos
self.id = node.id
self.name = node.name
self.coresNumber = node.coresNumber
self.ramSize = node.ramSize
self.speed = node.speed
# Dynamic sys infos
self.systemFreeRam = node.systemFreeRam
self.systemSwapPercentage = node.systemSwapPercentage
# Worker state
self.puliversion = node.puliversion
self.commands = node.commands
self.status = node.status
self.host = node.host
self.port = node.port
# self.pools = node.pools
self.caracteristics = node.caracteristics
self.performance = node.performance
self.excluded = node.excluded
# Timers
self.createDate = node.createDate
self.registerDate = node.registerDate
self.lastAliveTime = node.lastAliveTime
def _refresh(self):
raise NotImplementedError
# url = "/rendernodes/%s:%s/" % (self.host, self.port)
#
# try:
# rnDict = Server.get(url)
# for key, val in rnDict.iteritems():
# if hasattr(self, key):
# setattr(self, key, val)
# except (RequestTimeoutError, RequestError):
# logging.error("Impossible to refresh rendernode with query: \
# %s" % url)
def _sendPauseCommand(self, content):
'''
'''
url = "/pause/"
body = json.dumps(content)
try:
# No data awaited from request, an exception is raised
# if pause action could not be executed
request(self.host, self.port, url, "post", data=body)
except (RequestTimeoutError, RequestError):
logging.error("Impossible to send proper pause action to node %s\
with content: %s" % (url, content))
return False
return True
#
# User actions
#
def resume(self):
'''
Send command to render node to exit from pause
:return: A boolean indicating if the action has been properly executed
'''
return self._sendPauseCommand({'content': "0"})
def pause(self):
'''
| Send command to current RN to kill running command and pause
| NOTE: status will be effective after a short delay (approx. 50ms)
:return: A boolean indicating if the action succeeded
'''
return self._sendPauseCommand({'content': "-1"})
def killAndRestart(self):
'''
| Send command to kill command on a RN and restart it
| NOTE: status will be effective after a short delay (approx. 50ms)
:return: A boolean indicating if the action has been properly executed
'''
return self._sendPauseCommand({'content': "-3"})
def restart(self):
'''
| Send command to restart current RN
| NOTE: status will be effective after delay (approx. 50ms)
:return: A boolean indicating if the action has been properly executed
'''
return self._sendPauseCommand({'content': "-2"})
def getLog(self):
'''
Return a string containing the worker log.
'''
raise NotImplementedError
def tailLog(self, length=100):
'''
Return a string containing the tail of the worker log.
:param length: int indicating the number of lines to retrieve
'''
raise NotImplementedError
def setPerformanceIndex(self):
raise NotImplementedError
|
{
"content_hash": "2441372c3189c5079d884940e7d2c13e",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 86,
"avg_line_length": 29.298969072164947,
"alnum_prop": 0.5816326530612245,
"repo_name": "smaragden/OpenRenderManagement",
"id": "a701d3be9befdd49769f4d06f99d77eb8a026f8a",
"size": "5707",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/puliclient/model/renderNode.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "889392"
},
{
"name": "Shell",
"bytes": "5347"
}
],
"symlink_target": ""
}
|
__author__ = 'Mark McBride'
from . import Command
import os
import shutil
import subprocess
import traceback
from copy import copy
from twitter.common.collections import OrderedSet
from twitter.pants import is_jvm
from twitter.pants.base import Address, Target
from twitter.pants.targets import JavaLibrary
from twitter.pants.ant import AntBuilder, bang
class IvyResolve(Command):
"""Resolves ivy dependencies to a local directory, obviating the need for
an explicit resolve per build."""
__command__ = 'ivy_resolve'
@staticmethod
def _is_resolvable(target):
return is_jvm(target)
def setup_parser(self, parser, args):
parser.set_usage("%prog ivy_resolve ([spec]...)")
parser.add_option("--clean", action="store_true", dest = "clean", default = False,
help = "removes local libs directories")
parser.add_option("--intransitive", action="store_true", dest = "intransitive", default = False,
help = "only resolve dependencies for given spec")
parser.epilog = """Links ivy libs to a local directory, obviating the need for an explicit ivy resolve"""
def __init__(self, root_dir, parser, argv):
Command.__init__(self, root_dir, parser, argv)
self.clean = self.options.clean
self.intransitive = self.options.intransitive
# TODO: def not shared with lib.py
self.workspace_root = os.path.join(root_dir, '.pants.d')
self.ivy_jar = os.path.join(root_dir, 'build-support', 'ivy', 'lib', 'ivy-2.2.0.jar')
self.ivy_settings = os.path.join(root_dir, 'build-support', 'ivy', 'ivysettings.xml')
if self.args:
self.targets = self._parse_targets(OrderedSet(), root_dir)
else:
def get_targets():
for address in Command.scan_addresses(root_dir):
target = Target.get(address)
if IvyResolve._is_resolvable(target):
yield target
self.targets = list(get_targets())
def _parse_targets(self, targets, root_dir):
for spec in self.args:
try:
address = Address.parse(root_dir, spec)
except:
self.error("Problem parsing spec %s: %s" % (spec, traceback.format_exc()))
try:
target = Target.get(address)
except:
self.error("Problem parsing target %s: %s" % (address, traceback.format_exc()))
if address.is_meta:
print("target is meta")
target = target.do_in_context(lambda: bang.extract_target([target], None))
if not IvyResolve._is_resolvable(target):
self.error("Target: %s is not resolvable" % address)
targets.add(target)
if not self.intransitive:
def add_targets(ttarget):
if hasattr(ttarget, 'internal_dependencies'):
for dep in ttarget.internal_dependencies:
if IvyResolve._is_resolvable(dep):
targets.add(dep)
else:
print("skipping %s as it's not ivy resolvable" % dep.name)
target.walk(add_targets)
return targets
def execute(self):
for target in self.targets:
print("creating ivyxml for " + target.name)
ivyxml = self.create_ivyxml(target)
libs_dir = os.path.join(os.path.dirname(ivyxml), 'libs')
print("cleaning " + libs_dir)
if os.path.exists(libs_dir):
shutil.rmtree(libs_dir)
if not self.clean:
self.build_target_dir_fileset(target, ivyxml)
for configuration in ['default', 'test']:
self.build_libs_dir(target, ivyxml, configuration)
def build_target_dir_fileset(self, target, ivyxml):
print("writing target_dir fileset for " + target.name)
target_dirs = OrderedSet()
def add_targets(ttarget):
target_dirs.add(ttarget._create_template_data().id)
target.walk(add_targets)
target_dirs_file_name = os.path.join(os.path.dirname(ivyxml), "dependency_target_dirs.txt")
target_dirs_file = open(target_dirs_file_name, "w")
for target_dir in target_dirs:
target_dirs_file.write(target_dir + "/jvm\n")
target_dirs_file.close
def create_ivyxml(self, target):
builder = AntBuilder(self.error, self.workspace_root, False, False)
buildxml, ivyxml = builder.create_ant_builds(self.workspace_root, dict(), set(), target)
return ivyxml
def build_libs_dir(self, target, ivyxml, conf):
all_deps = OrderedSet()
all_sources = ['dummy']
def extract_jars(ttarget):
for jar_dep in ttarget.jar_dependencies:
if jar_dep.rev:
all_deps.add(copy(jar_dep))
target.walk(extract_jars)
def create_meta_target():
return JavaLibrary(target.name + '.deps',
all_sources,
dependencies = all_deps,
is_meta = True)
meta_target = target.do_in_context(create_meta_target)
local_ivy = os.path.abspath(ivyxml) + ".local"
AntBuilder.generate_ivy(self.workspace_root, local_ivy, meta_target)
libs_dir = os.path.join(os.path.dirname(os.path.abspath(ivyxml)), 'libs', conf)
if not os.path.exists(libs_dir):
os.makedirs(libs_dir)
classpath_result = subprocess.call([
'java',
'-jar', self.ivy_jar,
'-settings', self.ivy_settings,
'-ivy', local_ivy,
'-confs', conf,
'-retrieve',
"%s/[artifact]-[revision].[ext]" % libs_dir,
"-symlink",
"-sync"])
|
{
"content_hash": "5b3341d9bdbffe0e658d4c01ff2cafa4",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 109,
"avg_line_length": 35.324503311258276,
"alnum_prop": 0.6370453693288339,
"repo_name": "foursquare/commons-old",
"id": "67c1c70afc4e9c0b781ebf03b6a222460804bc7a",
"size": "6235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/twitter/pants/commands/ivy_resolve.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "2164475"
},
{
"name": "Python",
"bytes": "1285839"
},
{
"name": "Scala",
"bytes": "24999"
},
{
"name": "Shell",
"bytes": "6233"
},
{
"name": "Smalltalk",
"bytes": "10614"
}
],
"symlink_target": ""
}
|
from flask_wtf import Form
from wtforms import TextField
from wtforms.validators import DataRequired
class TodoForm(Form):
todo = TextField('todo', validators=[DataRequired()])
|
{
"content_hash": "d6570dddb9d5da721077e6b61bcd6cb1",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 57,
"avg_line_length": 26.142857142857142,
"alnum_prop": 0.7814207650273224,
"repo_name": "stanzheng/tastemvc",
"id": "8a6276d74521184f9510d7ccc401b6818d8c88f1",
"size": "183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask/app/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "374"
},
{
"name": "Python",
"bytes": "4666"
},
{
"name": "Shell",
"bytes": "52"
}
],
"symlink_target": ""
}
|
from discord.ext import commands
import discord
class Help:
def __init__(self, bot):
self.bot = bot
self.bot.remove_command('help')
@commands.command(pass_context=True)
async def help(self, ctx):
embed = discord.Embed(colour=0x933FCA)
embed.set_author(name=self.bot.user.name + ' | Help', icon_url='http://img.ctrlv.in/img/17/05/14/591802436f0db.png')
embed.add_field(name='\u2063', value='**Music:**', inline=False)
embed.add_field(name='Information', value=(
'Displays user data\n'
'Displays rank data\n'
'Displays server data\n'
))
embed.add_field(name='Command', value=(
'!userinfo <user>\n'
'!rankinfo <rank>\n'
'!serverinfo\n'
))
embed.add_field(name='\u2063', value='**Administration:** *(Admins Only)*', inline=False)
embed.add_field(name='Description', value=(
'Kick a user\n'
'Ban a user\n'
'Stab a user\n'
'Kill a user...\n'
'Revives a user from the dead\n'
'Clears x amount of messages\n'
))
embed.add_field(name='Command', value=(
'!kick <user> [message]\n'
'!ban <user> [message]\n'
'!stab <user>\n'
'!kill <user>\n'
'!revive <user>\n'
'!clear <amount> [user]\n'
))
await self.bot.send_message(ctx.message.author, embed=embed)
def setup(bot):
bot.add_cog(Help(bot))
|
{
"content_hash": "7dc2760f5670f87ec828001da379ad8e",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 124,
"avg_line_length": 32.723404255319146,
"alnum_prop": 0.5325097529258778,
"repo_name": "Mine15029/Magikal-Wazard-Bots",
"id": "fff9f0291555f474ac9fac946f8c8bf25207b222",
"size": "1538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/help_glados.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "52970"
}
],
"symlink_target": ""
}
|
from rosbridge_library.capability import Capability
from datetime import datetime
import threading
class ReceivedFragments():
"""
Singleton class to hold lists of received fragments in one 'global' object
"""
class __impl:
""" Implementation of the singleton interface """
def spam(self):
""" Test method, return singleton id """
return id(self)
__instance = None
# List of defragmentation instances
# Format:
# {
# <<message1_ID>> : {
# "timestamp_last_append" : <<datetime-object>>,
# "total" : <<total_fragments>>,
# "fragment_list" : {
# <<fragment1ID>>: <<fragment1_data>>,
# <<fragment2ID>>: <<fragment2_data>>,
# ...
# }
# },
# ...
lists = {}
def __init__(self):
""" Create singleton instance """
if ReceivedFragments.__instance is None:
ReceivedFragments.__instance = ReceivedFragments.__impl()
self.lists = {}
self.__dict__['_ReceivedFragments__instance'] = ReceivedFragments.__instance
def __getattr__(self, attr):
""" Delegate access to implementation """
return getattr(self.__instance, attr)
def __setattr__(self, attr, value):
""" Delegate access to implementation """
return setattr(self.__instance, attr, value)
class Defragment(Capability, threading.Thread):
fragment_timeout = 600
opcode = "fragment"
global received_fragments
protocol = None
def __init__(self, protocol):
Capability.__init__(self, protocol)
self.protocol = protocol
# populate parameters
if self.protocol.parameters != None:
self.fragment_timeout = self.protocol.parameters["fragment_timeout"]
protocol.register_operation(self.opcode, self.defragment)
self.received_fragments = ReceivedFragments().lists
threading.Thread.__init__(self)
# defragment() does:
# 1) take any incoming message with op-code "fragment"
# 2) check all existing fragment lists for time out # could be done by a thread but should be okay this way:
# 2.a) remove timed out lists (only if new fragment is not for this list) # - checking whenever a new fragment is received should suffice
# 3) create a new fragment list for new message ids # to have control over growth of fragment lists
# 3.a) check message fields
# 3.b) append the new fragment to 'the' list
# 3.c) add time stamp (last_fragment_appended) to 'this' list
# 4) check if the list of current fragment (message id) is complete
# 4.a) reconstruct the original message by concatenating the fragments
# 4.b) pass the reconstructed message string to protocol.incoming() # protocol.incoming is checking message fields by itself, so no need to do this before passing the reconstructed message to protocol
# 4.c) remove the fragment list to free up memory
def defragment(self, message):
now = datetime.now()
if self.received_fragments != None:
for id in self.received_fragments.keys() :
time_diff = now - self.received_fragments[id]["timestamp_last_append"]
if (time_diff.total_seconds() > self.fragment_timeout and
not self.received_fragments[id]["is_reconstructing"]):
log_msg = ["fragment list ", str(id), " timed out.."]
if message["id"] != id:
log_msg.append(" -> removing it..")
del self.received_fragments[id]
else:
log_msg.extend([" -> but we're just about to add fragment #"])
log_msg.extend([str(message.get("num")), " of "])
log_msg.extend([str(self.received_fragments[message.get("id")]["total"])])
log_msg.extend([" ..keeping the list"])
self.protocol.log("warning", ''.join(log_msg))
msg_opcode = message.get("op")
msg_id = message.get("id")
msg_num = message.get("num")
msg_total = message.get("total")
msg_data = message.get("data")
# Abort if any message field is missing
if ((msg_opcode == None) or (msg_id == None) or
(msg_num == None) or (msg_total == None) or
(msg_data == None)):
self.protocol.log("error", "received invalid fragment!")
return
log_msg = "fragment for messageID: " + str(msg_id) + " received."
self.protocol.log("debug", log_msg)
# Create fragment container if none exists yet
if msg_id not in self.received_fragments.keys():
self.received_fragments[msg_id] = {
"is_reconstructing": False,
"total": message["total"],
"timestamp_last_append": now,
"fragment_list": {}
}
log_msg = "opened new fragment list for messageID " + str(msg_id)
self.protocol.log("debug", log_msg)
#print "received fragments:", len(self.received_fragments[msg_id]["fragment_list"].keys())
# Add fragment to fragment container's list if not already in list
if ((msg_num not in self.received_fragments[msg_id]["fragment_list"].keys() ) and
msg_num <= self.received_fragments[msg_id]["total"] and
msg_total == self.received_fragments[msg_id]["total"]
):
self.received_fragments[msg_id]["fragment_list"][msg_num] = msg_data
self.received_fragments[msg_id]["timestamp_last_append"] = now
log_msg = ["appended fragment #" + str(msg_num)]
log_msg.extend([" (total: ", str(msg_total), ") to fragment list for messageID ", str(msg_id)])
self.protocol.log("debug", ''.join(log_msg))
else:
log_msg = "error while trying to append fragment " + str(msg_num)
self.protocol.log("error", log_msg)
return
received_all_fragments = False
existing_fragments = len(self.received_fragments[msg_id]["fragment_list"])
announced_total = self.received_fragments[msg_id]["total"]
# Make sure total number of fragments received
if existing_fragments == announced_total:
log_msg = ["enough/all fragments for messageID " + str(msg_id) + " received"]
log_msg.extend([" [", str(existing_fragments), "]"])
log_msg = ''.join(log_msg)
self.protocol.log("debug", log_msg)
# Check each fragment matches up
received_all_fragments = True
for i in range(0, announced_total):
if i not in self.received_fragments[msg_id]["fragment_list"]:
received_all_fragments = False
log_msg = "fragment #" +str(i)
log_msg += " for messageID " + str(msg_id) + " is missing! "
self.protocol.log("error", log_msg)
self.received_fragments[msg_id]["is_reconstructing"] = received_all_fragments
if received_all_fragments:
log_msg = "reconstructing original message " + str(msg_id)
self.protocol.log("debug", log_msg)
# Reconstruct the message
reconstructed_msg = ''.join(self.received_fragments[msg_id]["fragment_list"][0:message["total"]])
log_msg = ["reconstructed original message:\n"]
log_msg.append(reconstructed_msg)
log_msg = ''.join(log_msg)
self.protocol.log("debug", log_msg)
duration = datetime.now() - now
# Pass the reconstructed message to rosbridge
self.protocol.incoming(reconstructed_msg)
log_msg = ["reconstructed message (ID:" + str(msg_id) + ") from "]
log_msg.extend([str(msg_total), " fragments. "])
# cannot access msg.data if message is a service_response or else!
#log_msg += "[message length: " + str(len(str(json.loads(reconstructed_msg)["msg"]["data"]))) +"]"
log_msg.extend(["[duration: ", str(duration.total_seconds()), " s]"])
log_msg = ''.join(log_msg)
self.protocol.log("info", log_msg)
# Remove fragmentation container
del self.received_fragments[msg_id]
log_msg = "removed fragment list for messageID " + str(msg_id)
self.protocol.log("debug", log_msg)
def finish(self):
self.received_fragments = None
self.protocol.unregister_operation("fragment")
|
{
"content_hash": "5201b4b2f91be2cb254ae2e0d28fe753",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 212,
"avg_line_length": 44.192893401015226,
"alnum_prop": 0.5746611532276591,
"repo_name": "SNU-Sigma/rosbridge_suite",
"id": "29ef1a6e9db2a410b3e2579a6d145ffc1d3ea432",
"size": "8706",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "rosbridge_library/src/rosbridge_library/capabilities/defragmentation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1070"
},
{
"name": "CMake",
"bytes": "2773"
},
{
"name": "HTML",
"bytes": "25"
},
{
"name": "Python",
"bytes": "1508982"
},
{
"name": "Shell",
"bytes": "379"
}
],
"symlink_target": ""
}
|
import rest_framework.views
import rest_framework.response
from fjord.base.utils import smart_str
from fjord.events.models import get_product_details_history
class EventAPI(rest_framework.views.APIView):
def get(self, request):
events = get_product_details_history()
products = smart_str(request.GET.get('products'))
date_start = smart_str(request.GET.get('date_start'))
date_end = smart_str(request.GET.get('date_end'))
if products:
products = [prod.strip() for prod in products.split(',')]
events = [event for event in events
if event['product'] in products]
if date_start:
events = [event for event in events
if event['date'] >= date_start]
if date_end:
events = [event for event in events
if event['date'] <= date_end]
return rest_framework.response.Response({
'date_start': date_start if date_start else None,
'date_end': date_end if date_end else None,
'products': ','.join(products) if products else None,
'count': len(events),
'events': list(events)
})
def get_throttles(self):
# FIXME: At some point we should throttle use of this. We
# should probably do that as soon as it's hitting the db.
return []
|
{
"content_hash": "479622fba3e5513dd01e1c0bf51277fc",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 69,
"avg_line_length": 34.975,
"alnum_prop": 0.5889921372408864,
"repo_name": "Ritsyy/fjord",
"id": "ffdafb1d3887f765ca149205b608af1db1367281",
"size": "1399",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "fjord/events/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "158694"
},
{
"name": "HTML",
"bytes": "128135"
},
{
"name": "JavaScript",
"bytes": "302359"
},
{
"name": "Python",
"bytes": "884131"
},
{
"name": "Shell",
"bytes": "11743"
},
{
"name": "Smarty",
"bytes": "825"
}
],
"symlink_target": ""
}
|
"""plot metrics with matplotlib"""
import os.path
import shutil
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from ._numpy import NumpyMetric
__all__ = ['PlotMetric']
class PlotMetric(NumpyMetric):
"""Plot graphs of metrics. See :class:`NumpyMetric <._numpy.NumpyMetric>` for usage.
:cvar outdir: directory to save plots in. Defaults to ``./instrument_plots``.
"""
instances = {}
outdir = os.path.abspath("instrument_plots")
@classmethod
def _pre_dump(cls):
"""Output all recorded stats"""
shutil.rmtree(cls.outdir, ignore_errors=True)
os.makedirs(cls.outdir)
super(PlotMetric, cls)._pre_dump()
def _cleanup(self):
plt.clf()
plt.close()
super(PlotMetric, self)._cleanup()
def _output(self):
plt.figure(1, figsize = (8, 18))
plt.subplot(3, 1, 1)
self._histogram('count', self.count_mean, self.count_std, self.count_arr)
plt.subplot(3, 1, 2)
self._histogram('elapsed', self.elapsed_mean, self.elapsed_std, self.elapsed_arr)
plt.subplot(3, 1, 3)
self._scatter()
plt.savefig(os.path.join(self.outdir, ".".join((self.name, 'png'))),
bbox_inches="tight")
super(PlotMetric, self)._output()
def _histogram(self, which, mu, sigma, data):
"""plot a histogram. For internal use only"""
weights = np.ones_like(data)/len(data) # make bar heights sum to 100%
n, bins, patches = plt.hist(data, bins=25, weights=weights, facecolor='blue', alpha=0.5)
plt.title(r'%s %s: $\mu=%.2f$, $\sigma=%.2f$' % (self.name, which.capitalize(), mu, sigma))
plt.xlabel('Items' if which == 'count' else 'Seconds')
plt.ylabel('Frequency')
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda y, position: "{:.1f}%".format(y*100)))
def _scatter(self):
"""plot a scatter plot of count vs. elapsed. For internal use only"""
plt.scatter(self.count_arr, self.elapsed_arr)
plt.title('{}: Count vs. Elapsed'.format(self.name))
plt.xlabel('Items')
plt.ylabel('Seconds')
|
{
"content_hash": "8973ff053813dbaabdd032d0b199ba41",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 103,
"avg_line_length": 33.1764705882353,
"alnum_prop": 0.6205673758865248,
"repo_name": "wearpants/measure_it",
"id": "b40e842775e838d2dd3b29cf00e1cf5d19eb008c",
"size": "2256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "instrument/output/plot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "28355"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from django.views.generic import TemplateView, View
from django.http import HttpResponse
from django.views.decorators.cache import cache_page
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from common.diagrams_gpcr import DrawSnakePlot
from common.definitions import AA_PROPENSITY, HYDROPHOBICITY
from common.views import AbsTargetSelection
from common.definitions import AMINO_ACIDS, AMINO_ACID_GROUPS, STRUCTURAL_RULES
from construct.models import *
from construct.functions import *
from construct.tool import *
from protein.models import Protein, ProteinConformation, ProteinSegment
from structure.models import Structure
from mutation.models import Mutation
from residue.models import ResiduePositionSet
from datetime import datetime
import time
import json
import copy
import re
from collections import OrderedDict
Alignment = getattr(__import__('common.alignment_' + settings.SITE_NAME, fromlist=['Alignment']), 'Alignment')
# Create your views here.
#@cache_page(60 * 60 * 24)
def detail(request, slug):
# get constructs
c = Construct.objects.defer('schematics','snakecache').get(name=slug)
# get residues
residues = Residue.objects.filter(protein_conformation__protein=c.protein).order_by('sequence_number').prefetch_related(
'protein_segment', 'generic_number', 'display_generic_number')
residues_lookup = {}
for r in residues:
residues_lookup[r.sequence_number] = r
schematics = c.schematic()
chunk_size = 10
context = {'c':c, 'chunk_size': chunk_size, 'annotations': json.dumps(schematics['annotations']), 'schematics': schematics, 'residues_lookup': residues_lookup}
return render(request,'construct/construct_detail.html',context)
class ConstructStatistics(TemplateView):
"""
Fetching construct data for browser
"""
template_name = "construct/statistics.html"
def get_context_data (self, **kwargs):
context = super(ConstructStatistics, self).get_context_data(**kwargs)
cache_temp = cache.get('construct_statistics')
# if cache_temp:
# for key, val in cache_temp.items():
# context[key] = val
# return context
cons = Construct.objects.all().defer('schematics','snakecache').order_by("protein__entry_name","crystal__pdb_code").prefetch_related(
"crystal","mutations__effects","purification","protein__family__parent__parent__parent", "insertions__insert_type", "modifications", "deletions", "crystallization__chemical_lists",
"protein__species","structure__pdb_code","structure__publication__web_link", "contributor",
"structure__protein_conformation__protein__parent", "structure__state")
#PREPARE DATA
proteins_ids = Construct.objects.all().values_list('protein', flat = True)
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins_ids).filter(residue__generic_number__label__in=['1x50','8x50','5x50','6x50','3x50','4x50']).values_list('protein__entry_name','residue__sequence_number','residue__generic_number__label')
x50s = {}
for pc in pconfs:
if pc[0] not in x50s:
x50s[pc[0]] = {}
x50s[pc[0]][pc[2]] = pc[1]
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins_ids).filter(residue__protein_segment__slug__in=['TM3','TM4','TM5','TM6']).values('protein__entry_name','residue__protein_segment__slug').annotate(start=Min('residue__sequence_number'),GN=Max('residue__generic_number__label'),GN2=Min('residue__generic_number__label'),end=Max('residue__sequence_number'))
# print(pconfs)
# x50s = {}
track_anamalities = {}
for pc in pconfs:
#print(pc)
entry_name = pc['protein__entry_name']
helix = pc['residue__protein_segment__slug'][-1]
if entry_name not in track_anamalities:
track_anamalities[entry_name] = {}
if helix not in track_anamalities[entry_name]:
track_anamalities[entry_name][helix] = [0,0]
x50 = x50s[entry_name][helix+"x50"]
gn_start = int(pc['GN2'][-2:])
gn_end = int(pc['GN'][-2:])
seq_start = pc['start']
seq_end = pc['end']
seq_range_start = x50-seq_start
seq_range_end = seq_end-x50
gn_range_start = 50-gn_start
gn_range_end = gn_end-50
if seq_range_start!=gn_range_start:
# print(entry_name,"Helix",helix, "has anamolity in start",gn_range_start-seq_range_start)
track_anamalities[entry_name][helix][0] = gn_range_start-seq_range_start
if seq_range_end!=gn_range_end:
# print(entry_name,"Helix",helix, "has anamolity in end",gn_range_end-seq_range_end)
track_anamalities[entry_name][helix][1] = gn_range_end-seq_range_end
#print(pc,helix,x50,gn_start,gn_end,seq_start,seq_end,,x50-seq_start,50-gn_start,gn_end-50)
#print(x50s[entry_name])
# if pc[0] not in x50s:
# x50s[pc[0]] = {}
# x50s[pc[0]][pc[2]] = pc[1]
# print(track_anamalities)
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins_ids).prefetch_related('protein').filter(residue__protein_segment__slug='TM1').annotate(start=Min('residue__sequence_number'))
#pconfs = ProteinConformation.objects.filter(protein_id__in=proteins).filter(residue__generic_number__label__in=['1x50']).values_list('protein__entry_name','residue__sequence_number','residue__generic_number__label')
tm1_start = {}
for pc in pconfs:
tm1_start[pc.protein.entry_name] = pc.start
pconfs = ProteinConformation.objects.filter(protein_id__in=proteins_ids).prefetch_related('protein').filter(residue__protein_segment__slug='C-term').annotate(start=Min('residue__sequence_number'),end=Max('residue__sequence_number'))
cterm_start = {}
cterm_end = {}
for pc in pconfs:
cterm_start[pc.protein.entry_name] = pc.start
cterm_end[pc.protein.entry_name] = pc.end
#GRAB RESIDUES for mutations
mutations = []
positions = []
proteins = []
full_p_name = {}
for c in cons:
p = c.protein
entry_name = p.entry_name
full_p_name[entry_name] = p.name.replace('receptor','').replace('-adrenoceptor','')
p_class = p.family.slug.split('_')[0]
pdb = c.crystal.pdb_code
pdb = '' # do not count same mutation many times
for mutation in c.mutations.all():
skip = True
for effect in mutation.effects.all():
if effect.slug == 'thermostabilising':
skip = False
if skip:
continue
if p.entry_name not in proteins:
proteins.append(entry_name)
mutations.append((mutation,entry_name,pdb,p_class))
if mutation.sequence_number not in positions:
positions.append(mutation.sequence_number)
rs = Residue.objects.filter(protein_conformation__protein__entry_name__in=proteins, sequence_number__in=positions).prefetch_related('generic_number','protein_conformation__protein','annotations__data_type')
rs_lookup = {}
gns = []
for r in rs:
if not r.generic_number: #skip non gn
continue
entry_name = r.protein_conformation.protein.entry_name
pos = r.sequence_number
# segment = r.protein_segment.slug
if entry_name not in rs_lookup:
rs_lookup[entry_name] = {}
if pos not in rs_lookup[entry_name]:
rs_lookup[entry_name][pos] = r
rs = Residue.objects.filter(protein_conformation__protein__id__in=proteins_ids, protein_segment__slug__in=['N-term','C-term'],annotations__data_type__slug='dynamine').prefetch_related('generic_number','protein_segment','protein_conformation__protein','annotations__data_type')
rs_annotations = {}
for r in rs:
entry_name = r.protein_conformation.protein.entry_name
pos = r.sequence_number
segment = r.protein_segment.slug
if entry_name not in rs_annotations:
rs_annotations[entry_name] = {}
if segment not in rs_annotations[entry_name]:
rs_annotations[entry_name][segment] = {}
if pos not in rs_annotations[entry_name][segment]:
try:
rs_annotations[entry_name][segment][pos] = r.annotations.all()[0].value
except:
print('no dynamine for ',entry_name,pos,r.pk)
# print(rs_annotations)
truncations = {}
truncations_new = {}
truncations['nterm'] = {}
truncations['nterm_fusion'] = {}
truncations_new['nterm'] = OrderedDict()
truncations_new['cterm'] = OrderedDict()
truncations_new['nterm_fusion'] = OrderedDict()
truncations_new['icl3_fusion'] = OrderedDict()
truncations_new['icl2_fusion'] = OrderedDict()
truncations_new['icl3_start'] = OrderedDict()
truncations_new['icl3_end'] = OrderedDict()
truncations_new['icl3_fusion_start'] = OrderedDict()
truncations_new['icl3_fusion_end'] = OrderedDict()
truncations_new['icl2_fusion_start'] = OrderedDict()
truncations_new['icl2_fusion_end'] = OrderedDict()
track_fusions = OrderedDict()
track_fusions2 = OrderedDict()
track_without_fusions = OrderedDict()
truncations_new_possibilties = {}
truncations_maximums = {}
truncations_new_sum = {}
truncations['cterm'] = {}
truncations['icl3'] = {}
truncations['icl3_fusion'] = {}
truncations['icl2'] = {}
truncations['icl2_fusion'] = {}
class_names = {}
states = {}
linkers_exist_before = {}
linkers_exist_after = {}
fusion_by_pdb = {}
fusions_short = {
'Flavodoxin': 'Flav',
'T4 Lysozyme (T4L)': 'T4L',
'Rubredoxin': 'Rubr',
'PGS (Pyrococcus abyssi glycogen synthase)': 'PGS',
'BRIL (b562RIL)': 'BRIL',
'mT4L' : 'mT4L',
'OB1' : 'OB1',
'3A Arrestin': 'Arr'
}
for c in cons:
p = c.protein
entry_name = p.entry_name
pdb_code = c.crystal.pdb_code
entry_name_pdb = entry_name+ "_"+ pdb_code
state = c.structure.state.slug
entry_name_pdb_state = entry_name+ "_"+ pdb_code + "_" +state
crystal_p = c.structure.protein_conformation.protein.parent.entry_name
if entry_name!=crystal_p:
print("ERROR ERROR ERROR",pdb_code,entry_name,crystal_p)
c.protein = c.structure.protein_conformation.protein.parent
c.save()
#print(c.structure.state.slug)
p_class = p.family.slug.split('_')[0]
if p_class not in class_names:
class_names[p_class] = re.sub(r'\([^)]*\)', '', p.family.parent.parent.parent.name)
p_class_name = class_names[p_class].strip()
states[pdb_code] = state
# if state=='active':
# p_class_name += "_active"
# if state=='intermediate':
# p_class_name += "_interm"
fusion_n = False
fusion_icl3 = False
fusion_position, fusions, linkers = c.fusion()
found_nterm = False
found_cterm = False
if p_class_name not in track_fusions:
track_fusions[p_class_name] = OrderedDict()
# print(entry_name_pdb,fusions)
if fusions:
if entry_name_pdb not in track_fusions[p_class_name]:
track_fusions[p_class_name][entry_name_pdb] = {'found':[],'for_print':[], '3_4_length':[], '5_6_length':[], '3_4_deleted':[], '5_6_deleted':[]}
if fusions:
fusion_name = fusions[0][2]
fusion_by_pdb[pdb_code] = fusions_short[fusion_name]
if fusion_name not in track_fusions2:
track_fusions2[fusion_name] = {'found':[],'for_print':[]}
# if entry_name=='aa2ar_human':
# print(state,p_class_name)
for deletion in c.deletions.all():
# if entry_name=='aa2ar_human':
# print(entry_name,deletion.start,cterm_start[entry_name],c.name) # lpar1_human
if deletion.end <= x50s[entry_name]['1x50']:
found_nterm = True
bw = "1."+str(50-x50s[entry_name]['1x50']+deletion.end)
#bw = bw + " " + str(x50s[entry_name]['1x50']-deletion.end)
from_tm1 = tm1_start[entry_name] - deletion.end-1
if entry_name=='agtr1_human' and pdb_code=='4YAY':
# print(from_tm1,entry_name,c.name,fusion_position)
# This is due to odd situation with 4YAY where they deleted in the middle.
from_tm1 = 14
if pdb_code=='4ZUD':
from_tm1 = 9
position = 'nterm'
if fusion_position=='nterm' or fusion_position=='nterm_icl3':
position = 'nterm_fusion'
if from_tm1 not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(from_tm1)
if from_tm1 not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(from_tm1)
if p_class_name not in truncations[position]:
truncations[position][p_class_name] = {}
if bw not in truncations[position][p_class_name]:
truncations[position][p_class_name][bw] = []
if entry_name_pdb not in truncations[position][p_class_name][bw]:
truncations[position][p_class_name][bw].append(entry_name_pdb)
if position not in truncations_new_possibilties:
truncations_new_possibilties[position] = []
if position not in truncations_maximums:
truncations_maximums[position] = {}
if p_class_name not in truncations_maximums[position]:
truncations_maximums[position][p_class_name] = 0
if from_tm1 not in truncations_new_possibilties[position]:
truncations_new_possibilties[position].append(from_tm1)
truncations_new_possibilties[position] = sorted(truncations_new_possibilties[position])
if tm1_start[entry_name]-1 > truncations_maximums[position][p_class_name]:
truncations_maximums[position][p_class_name] = tm1_start[entry_name]-1
if position not in truncations_new_sum:
truncations_new_sum[position] = {}
if p_class_name not in truncations_new_sum[position]:
truncations_new_sum[position][p_class_name] = {}
if p_class_name not in truncations_new[position]:
truncations_new[position][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if entry_name_pdb_state not in truncations_new[position][p_class_name]['receptors']:
truncations_new[position][p_class_name]['receptors'][entry_name_pdb_state] = [[],[],[tm1_start[entry_name]-1]]
if fusion_position!='nterm' or 1==1:
if from_tm1 not in truncations_new[position][p_class_name]['receptors'][entry_name_pdb_state][0]:
truncations_new[position][p_class_name]['receptors'][entry_name_pdb_state][0].append(from_tm1)
if from_tm1 not in truncations_new_sum[position][p_class_name]:
truncations_new_sum[position][p_class_name][from_tm1] = 0
truncations_new_sum[position][p_class_name][from_tm1] += 1
# if from_tm1 not in truncations_new[position][p_class_name]['possiblities']:
# truncations_new[position][p_class_name]['possiblities'].append(from_tm1)
# truncations_new[position][p_class_name]['possiblities'] = sorted(truncations_new[position][p_class_name]['possiblities'])
# if from_tm1==0:
# print(state,entry_name,p_class_name,truncations_new[position][p_class_name]['receptors'][entry_name])
if deletion.start >= x50s[entry_name]['8x50']:
found_cterm = True
import html
bw = x50s[entry_name]['8x50']-deletion.start
bw = "8."+str(50-x50s[entry_name]['8x50']+deletion.start)
from_h8 = deletion.start - cterm_start[entry_name]
# print(p_class_name,':',html.unescape(p.family.name),':',entry_name,':',pdb_code,':',deletion.start-x50s[entry_name]['8x50'],':',from_h8)
if p_class_name not in truncations['cterm']:
truncations['cterm'][p_class_name] = {}
if bw not in truncations['cterm'][p_class_name]:
truncations['cterm'][p_class_name][bw] = []
if entry_name_pdb not in truncations['cterm'][p_class_name][bw]:
truncations['cterm'][p_class_name][bw].append(entry_name_pdb)
position = 'cterm'
if deletion.start>1000:
#TODO there are some wrong ones, can be seen by having >1000 positions which are fusion
continue
print(deletion.start,from_h8,cterm_start[entry_name],c.crystal.pdb_code )
if position not in truncations_new_possibilties:
truncations_new_possibilties[position] = []
if position not in truncations_maximums:
truncations_maximums[position] = {}
if p_class_name not in truncations_maximums[position]:
truncations_maximums[position][p_class_name] = 0
if from_h8 not in truncations_new_possibilties[position]:
truncations_new_possibilties[position].append(from_h8)
truncations_new_possibilties[position] = sorted(truncations_new_possibilties[position])
if from_h8 > truncations_maximums[position][p_class_name]:
truncations_maximums[position][p_class_name] = from_h8
if position not in truncations_new_sum:
truncations_new_sum[position] = {}
if p_class_name not in truncations_new_sum[position]:
truncations_new_sum[position][p_class_name] = {}
if p_class_name not in truncations_new[position]:
truncations_new[position][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if entry_name_pdb not in truncations_new[position][p_class_name]['receptors']:
truncations_new[position][p_class_name]['receptors'][entry_name_pdb] = [[],[],[cterm_end[entry_name]-cterm_start[entry_name]+1]]
if from_h8 not in truncations_new[position][p_class_name]['receptors'][entry_name_pdb][0]:
truncations_new[position][p_class_name]['receptors'][entry_name_pdb][0].append(from_h8)
if from_h8 not in truncations_new_sum[position][p_class_name]:
truncations_new_sum[position][p_class_name][from_h8] = 0
truncations_new_sum[position][p_class_name][from_h8] += 1
if deletion.start > x50s[entry_name]['5x50'] and deletion.start < x50s[entry_name]['6x50']:
# if linkers['before']:
# print(entry_name,c.name,deletion.start,deletion.end,x50s[entry_name]['5x50'])
if linkers['before']:
deletion.start += len(linkers['before'])
linkers_exist_before[c.crystal.pdb_code] = len(linkers['before'])
if linkers['after']:
deletion.end -= len(linkers['after'])
linkers_exist_after[c.crystal.pdb_code] = len(linkers['after'])
# if linkers['before']:
# print(entry_name,c.name,deletion.start,deletion.end,x50s[entry_name]['5x50'])
fusion_icl3 = True
bw = x50s[entry_name]['5x50']-deletion.start-1
bw = "5x"+str(50-x50s[entry_name]['5x50']+deletion.start+track_anamalities[entry_name]['5'][1]-1)
bw_real = "5."+str(50-x50s[entry_name]['5x50']+deletion.start-1)
bw2 = "6x"+str(50-x50s[entry_name]['6x50']+deletion.end+track_anamalities[entry_name]['6'][0]+1)
bw2_real = "6."+str(50-x50s[entry_name]['6x50']+deletion.end+1)
# Make 1.50x50 number
# bw = bw_real+"x"+bw
# bw2 = bw2_real+"x"+bw2
bw_combine = bw+"-"+bw2
position = 'icl3'
del_length = 1+deletion.end-deletion.start
if bw=='5x107':
# Skip these false deletions in melga
continue
# if entry_name=='s1pr1_human':
# print("CHECK",deletion.start,deletion.end, bw,bw2)
if entry_name=='s1pr1_human' and deletion.start==250:
# Skip these false deletions in s1pr1_human (3V2W, 3V2Y)
continue
l_5_6_length = x50s[entry_name]['6x50']-x50s[entry_name]['5x50']
if fusion_position=='icl3' or fusion_position=='nterm_icl3':
position = 'icl3_fusion'
if bw not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(bw)
if bw2 not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(bw2)
# else:
# print(entry_name,c.name,fusions)
if fusion_position=='icl3' or fusion_position=='nterm_icl3':
#Track those with fusion
if bw not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(bw)
if bw2 not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(bw2)
if del_length not in track_fusions[p_class_name][entry_name_pdb]['5_6_deleted']:
track_fusions[p_class_name][entry_name_pdb]['5_6_deleted'].append(del_length)
if l_5_6_length not in track_fusions[p_class_name][entry_name_pdb]['5_6_length']:
track_fusions[p_class_name][entry_name_pdb]['5_6_length'].append(l_5_6_length)
if p_class_name not in truncations_new[position+'_start']:
truncations_new[position+'_start'][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if entry_name_pdb not in truncations_new[position+'_start'][p_class_name]['receptors']:
truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb] = [[],[],[bw]]
if bw not in truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb][0]:
truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb][0].append(bw)
if p_class_name not in truncations_new[position+'_end']:
truncations_new[position+'_end'][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if entry_name_pdb not in truncations_new[position+'_end'][p_class_name]['receptors']:
truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb] = [[],[],[bw2]]
if bw not in truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb][0]:
truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb][0].append(bw2)
else:
# print('ICL3 CUT WITHOUT FUSION',bw_combine,entry_name,c.name)
if p_class_name not in track_without_fusions:
track_without_fusions[p_class_name] = OrderedDict()
if entry_name_pdb not in track_without_fusions[p_class_name]:
track_without_fusions[p_class_name][entry_name_pdb] = {'found':[],'for_print':[], '3_4_length':[], '5_6_length':[], '3_4_deleted':[], '5_6_deleted':[]}
#Track those without fusion
if bw not in track_without_fusions[p_class_name][entry_name_pdb]['found']:
track_without_fusions[p_class_name][entry_name_pdb]['found'].append(bw)
if bw2 not in track_without_fusions[p_class_name][entry_name_pdb]['found']:
track_without_fusions[p_class_name][entry_name_pdb]['found'].append(bw2)
if del_length not in track_without_fusions[p_class_name][entry_name_pdb]['5_6_deleted']:
track_without_fusions[p_class_name][entry_name_pdb]['5_6_deleted'].append(del_length)
if l_5_6_length not in track_without_fusions[p_class_name][entry_name_pdb]['5_6_length']:
track_without_fusions[p_class_name][entry_name_pdb]['5_6_length'].append(l_5_6_length)
if p_class_name not in truncations_new[position+'_start']:
truncations_new[position+'_start'][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if entry_name_pdb not in truncations_new[position+'_start'][p_class_name]['receptors']:
truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb] = [[],[],[bw]]
if bw not in truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb][0]:
truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb][0].append(bw)
if p_class_name not in truncations_new[position+'_end']:
truncations_new[position+'_end'][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if entry_name_pdb not in truncations_new[position+'_end'][p_class_name]['receptors']:
truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb] = [[],[],[bw2]]
if bw not in truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb][0]:
truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb][0].append(bw2)
if p_class_name not in truncations[position]:
truncations[position][p_class_name] = {}
if bw_combine not in truncations[position][p_class_name]:
truncations[position][p_class_name][bw_combine] = []
if entry_name_pdb not in truncations[position][p_class_name][bw_combine]:
truncations[position][p_class_name][bw_combine].append(entry_name_pdb)
if position+"_start" not in truncations_new_possibilties:
truncations_new_possibilties[position+"_start"] = []
if position+"_end" not in truncations_new_possibilties:
truncations_new_possibilties[position+"_end"] = []
if bw not in truncations_new_possibilties[position+"_start"]:
truncations_new_possibilties[position+"_start"].append(bw)
truncations_new_possibilties[position+"_start"] = sorted(truncations_new_possibilties[position+"_start"])
if bw2 not in truncations_new_possibilties[position+"_end"]:
truncations_new_possibilties[position+"_end"].append(bw2)
truncations_new_possibilties[position+"_end"] = sorted(truncations_new_possibilties[position+"_end"])
if deletion.start > x50s[entry_name]['3x50'] and deletion.start < x50s[entry_name]['4x50']:
# if fusion_icl3:
# print(entry_name,c.name,deletion.start,deletion.end,x50s[entry_name]['5x50'])
fusion_icl3 = True
bw = x50s[entry_name]['5x50']-deletion.start
bw = "3x"+str(50-x50s[entry_name]['3x50']+deletion.start+track_anamalities[entry_name]['3'][1]-1)
bw_real = "3."+str(50-x50s[entry_name]['3x50']+deletion.start-1)
bw2 = "4x"+str(50-x50s[entry_name]['4x50']+deletion.end+track_anamalities[entry_name]['4'][0]+1)
bw2_real = "4."+str(50-x50s[entry_name]['4x50']+deletion.end+1)
# Make 1.50x50 number
# bw = bw_real+"x"+bw
# bw2 = bw2_real+"x"+bw2
bw_combine = bw+"-"+bw2
position = 'icl2'
del_length = 1+deletion.end-deletion.start
l_3_4_length = x50s[entry_name]['4x50']-x50s[entry_name]['3x50']
# print(fusion_position)
if fusion_position=='icl3' or fusion_position=='nterm_icl3':
position = 'icl2_fusion'
if bw not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(bw)
if bw2 not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(bw2)
if p_class_name not in truncations_new[position+'_start']:
truncations_new[position+'_start'][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if entry_name_pdb not in truncations_new[position+'_start'][p_class_name]['receptors']:
truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb] = [[],[],[bw]]
if bw not in truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb][0]:
truncations_new[position+'_start'][p_class_name]['receptors'][entry_name_pdb][0].append(bw)
if p_class_name not in truncations_new[position+'_end']:
truncations_new[position+'_end'][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if entry_name_pdb not in truncations_new[position+'_end'][p_class_name]['receptors']:
truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb] = [[],[],[bw2]]
if bw not in truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb][0]:
truncations_new[position+'_end'][p_class_name]['receptors'][entry_name_pdb][0].append(bw2)
if bw not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(bw)
if bw2 not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(bw2)
if del_length not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['3_4_deleted'].append(del_length)
if l_3_4_length not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['3_4_length'].append(l_3_4_length)
if p_class_name not in truncations[position]:
truncations[position][p_class_name] = {}
if bw_combine not in truncations[position][p_class_name]:
truncations[position][p_class_name][bw_combine] = []
if entry_name_pdb not in truncations[position][p_class_name][bw_combine]:
truncations[position][p_class_name][bw_combine].append(entry_name_pdb)
if position+"_start" not in truncations_new_possibilties:
truncations_new_possibilties[position+"_start"] = []
if position+"_end" not in truncations_new_possibilties:
truncations_new_possibilties[position+"_end"] = []
if bw not in truncations_new_possibilties[position+"_start"]:
truncations_new_possibilties[position+"_start"].append(bw)
truncations_new_possibilties[position+"_start"] = sorted(truncations_new_possibilties[position+"_start"])
if bw2 not in truncations_new_possibilties[position+"_end"]:
truncations_new_possibilties[position+"_end"].append(bw2)
truncations_new_possibilties[position+"_end"] = sorted(truncations_new_possibilties[position+"_end"])
if fusions:
if track_fusions[p_class_name][entry_name_pdb] == {'found':[],'for_print':[], '3_4_length':[], '5_6_length':[], '3_4_deleted':[], '5_6_deleted':[]}:
if fusion_position=='nterm' or fusions[0][3].startswith('N-term'):
from_tm1 = tm1_start[entry_name]-1
# print(entry_name_pdb,'Seems to be without truncated N-term, fixme',tm1_start[entry_name])
position = 'nterm_fusion'
if from_tm1 not in truncations_new_possibilties[position]:
truncations_new_possibilties[position].append(from_tm1)
truncations_new_possibilties[position] = sorted(truncations_new_possibilties[position])
if from_tm1 not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(from_tm1)
if from_tm1 not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(from_tm1)
elif not fusions[0][3].startswith('C-term'):
# print(entry_name_pdb,'NOT FOUND CUT??',fusion_position,fusions)
deletion.start = fusions[0][4] #the next one is "cut"
deletion.end = fusions[0][4]+1 #the 'prev' is cut
if deletion.start > x50s[entry_name]['5x50'] and deletion.start < x50s[entry_name]['6x50']:
# if fusion_icl3:
# print(entry_name,c.name,deletion.start,deletion.end,x50s[entry_name]['5x50'])
fusion_icl3 = True
bw = x50s[entry_name]['5x50']-deletion.start
bw = "5x"+str(50-x50s[entry_name]['5x50']+deletion.start+track_anamalities[entry_name]['5'][1])
bw_real = "5."+str(50-x50s[entry_name]['5x50']+deletion.start)
bw2 = "6x"+str(50-x50s[entry_name]['6x50']+deletion.end+track_anamalities[entry_name]['6'][0])
bw2_real = "6."+str(50-x50s[entry_name]['6x50']+deletion.end)
# Make 1.50x50 number
# bw = bw_real+"x"+bw
# bw2 = bw2_real+"x"+bw2
bw_combine = bw+"-"+bw2
position = 'icl3'
del_length = 1+deletion.end-deletion.start
l_5_6_length = x50s[entry_name]['6x50']-x50s[entry_name]['5x50']
if fusion_position=='icl3' or fusion_position=='nterm_icl3':
position = 'icl3_fusion'
if bw not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(bw)
if bw2 not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(bw2)
if fusion_position=='icl3' or fusion_position=='nterm_icl3':
#Track those with fusion
if bw not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(bw)
if bw2 not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(bw2)
if del_length not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['5_6_deleted'].append(del_length)
if l_5_6_length not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['5_6_length'].append(l_5_6_length)
else:
print('ICL3 CUT WITHOUT FUSION',bw_combine,entry_name,c.name)
if p_class_name not in track_without_fusions:
track_without_fusions[p_class_name] = OrderedDict()
if entry_name_pdb not in track_without_fusions[p_class_name]:
track_without_fusions[p_class_name][entry_name_pdb] = {'found':[],'for_print':[]}
#Track those without fusion
if bw not in track_without_fusions[p_class_name][entry_name_pdb]['found']:
track_without_fusions[p_class_name][entry_name_pdb]['found'].append(bw)
if bw2 not in track_without_fusions[p_class_name][entry_name_pdb]['found']:
track_without_fusions[p_class_name][entry_name_pdb]['found'].append(bw2)
if p_class_name not in truncations[position]:
truncations[position][p_class_name] = {}
if bw_combine not in truncations[position][p_class_name]:
truncations[position][p_class_name][bw_combine] = []
if entry_name_pdb not in truncations[position][p_class_name][bw_combine]:
truncations[position][p_class_name][bw_combine].append(entry_name_pdb)
if position+"_start" not in truncations_new_possibilties:
truncations_new_possibilties[position+"_start"] = []
if position+"_end" not in truncations_new_possibilties:
truncations_new_possibilties[position+"_end"] = []
if bw not in truncations_new_possibilties[position+"_start"]:
truncations_new_possibilties[position+"_start"].append(bw)
truncations_new_possibilties[position+"_start"] = sorted(truncations_new_possibilties[position+"_start"])
if bw2 not in truncations_new_possibilties[position+"_end"]:
truncations_new_possibilties[position+"_end"].append(bw2)
truncations_new_possibilties[position+"_end"] = sorted(truncations_new_possibilties[position+"_end"])
if deletion.start > x50s[entry_name]['3x50'] and deletion.start < x50s[entry_name]['4x50']:
# if fusion_icl3:
# print(entry_name,c.name,deletion.start,deletion.end,x50s[entry_name]['5x50'])
fusion_icl3 = True
bw = x50s[entry_name]['5x50']-deletion.start
bw = "3x"+str(50-x50s[entry_name]['3x50']+deletion.start+track_anamalities[entry_name]['3'][1])
bw_real = "3."+str(50-x50s[entry_name]['3x50']+deletion.start)
bw2 = "4x"+str(50-x50s[entry_name]['4x50']+deletion.end+track_anamalities[entry_name]['4'][0])
bw2_real = "4."+str(50-x50s[entry_name]['4x50']+deletion.end)
# Make 1.50x50 number
# bw = bw_real+"x"+bw
# bw2 = bw2_real+"x"+bw2
bw_combine = bw+"-"+bw2
position = 'icl2'
del_length = deletion.end-deletion.start-1
l_3_4_length = x50s[entry_name]['4x50']-x50s[entry_name]['3x50']
if fusion_position=='icl3':
position = 'icl2_fusion'
if bw not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(bw)
if bw2 not in track_fusions2[fusion_name]['found']:
track_fusions2[fusion_name]['found'].append(bw2)
if bw not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(bw)
if bw2 not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['found'].append(bw2)
if del_length not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['3_4_deleted'].append(del_length)
if l_3_4_length not in track_fusions[p_class_name][entry_name_pdb]['found']:
track_fusions[p_class_name][entry_name_pdb]['3_4_length'].append(l_3_4_length)
if p_class_name not in truncations[position]:
truncations[position][p_class_name] = {}
if bw_combine not in truncations[position][p_class_name]:
truncations[position][p_class_name][bw_combine] = []
if entry_name_pdb not in truncations[position][p_class_name][bw_combine]:
truncations[position][p_class_name][bw_combine].append(entry_name_pdb)
if position+"_start" not in truncations_new_possibilties:
truncations_new_possibilties[position+"_start"] = []
if position+"_end" not in truncations_new_possibilties:
truncations_new_possibilties[position+"_end"] = []
if bw not in truncations_new_possibilties[position+"_start"]:
truncations_new_possibilties[position+"_start"].append(bw)
truncations_new_possibilties[position+"_start"] = sorted(truncations_new_possibilties[position+"_start"])
if bw2 not in truncations_new_possibilties[position+"_end"]:
truncations_new_possibilties[position+"_end"].append(bw2)
truncations_new_possibilties[position+"_end"] = sorted(truncations_new_possibilties[position+"_end"])
else:
print(entry_name_pdb," is CTERM FUSION")
position = 'nterm'
if fusion_position=='nterm' or fusion_position=='nterm_icl3':
position = 'nterm_fusion'
if p_class_name not in truncations_new[position]:
truncations_new[position][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
# if entry_name=='aa2ar_human':
# print(found_nterm,entry_name,position,p_class_name)
from_tm1 = tm1_start[entry_name]-1
if not found_nterm:
if position not in truncations_new_sum:
truncations_new_sum[position] = {}
if p_class_name not in truncations_new_sum[position]:
truncations_new_sum[position][p_class_name] = {}
if from_tm1 not in truncations_new_sum[position][p_class_name]:
truncations_new_sum[position][p_class_name][from_tm1] = 0
#if full receptor in xtal
if entry_name_pdb not in truncations_new[position][p_class_name]['receptors']:
truncations_new[position][p_class_name]['receptors'][entry_name_pdb] = [[],[from_tm1],[from_tm1]]
# add one for this position if it is first time receptor is mentioned
truncations_new_sum[position][p_class_name][from_tm1] += 1
else:
if from_tm1 not in truncations_new[position][p_class_name]['receptors'][entry_name_pdb][1]:
truncations_new[position][p_class_name]['receptors'][entry_name_pdb][1].append(from_tm1)
truncations_new_sum[position][p_class_name][from_tm1] += 1
# else:
# #if full was found, fill in the max
# #print(entry_name,found_nterm)
# if from_tm1 not in truncations_new[position][p_class_name]['receptors'][entry_name][1]:
# truncations_new[position][p_class_name]['receptors'][entry_name][2].append(from_tm1)
if position!='nterm_fusion' and from_tm1 not in truncations_new_possibilties[position]:
truncations_new_possibilties[position].append(from_tm1)
truncations_new_possibilties[position] = sorted(truncations_new_possibilties[position])
position = 'cterm'
if p_class_name not in truncations_new[position]:
truncations_new[position][p_class_name] = {'receptors':OrderedDict(),'no_cut':[], 'possiblities':[]}
if position not in truncations_new_possibilties:
truncations_new_possibilties[position] = []
from_h8 = cterm_end[entry_name] - cterm_start[entry_name]+1
if not found_cterm:
if position not in truncations_new_sum:
truncations_new_sum[position] = {}
if p_class_name not in truncations_new_sum[position]:
truncations_new_sum[position][p_class_name] = {}
if from_h8 not in truncations_new_sum[position][p_class_name]:
truncations_new_sum[position][p_class_name][from_h8] = 0
if entry_name_pdb not in truncations_new[position][p_class_name]['receptors']:
truncations_new[position][p_class_name]['receptors'][entry_name_pdb] = [[],[from_h8],[from_h8]]
# add one for this position if it is first time receptor is mentioned
truncations_new_sum[position][p_class_name][from_h8] += 1
else:
if from_h8 not in truncations_new[position][p_class_name]['receptors'][entry_name_pdb][1]:
truncations_new[position][p_class_name]['receptors'][entry_name_pdb][1].append(from_h8)
truncations_new_sum[position][p_class_name][from_h8] += 1
# else:
# if from_h8 not in truncations_new[position][p_class_name]['receptors'][entry_name][1]:
# truncations_new[position][p_class_name]['receptors'][entry_name][1].append(from_h8)
if from_h8 not in truncations_new_possibilties[position]:
truncations_new_possibilties[position].append(from_h8)
truncations_new_possibilties[position] = sorted(truncations_new_possibilties[position])
#print(truncations_new)
max_pos_range = {}
max_pos_range2 = {}
max_pos_range3 = {}
site_fusions = {}
for site in truncations_new:
# print(site)
max_pos_range[site] = 0
max_pos_range2[site] = [100,0,0]
max_pos_range3[site] = [0,0]
site_fusions[site] = []
for pclass, val in truncations_new[site].items():
# print(site,pclass)
unique_sites = OrderedDict()
sites = {}
distinct_fusion = {}
min_cut = 0
max_cut = 0
if site not in truncations_new_sum:
truncations_new_sum[site] = {}
if pclass not in truncations_new_sum[site]:
truncations_new_sum[site][pclass] = {}
for r,v in val['receptors'].items():
entry_name = "_".join(r.split("_")[:2])
original_entryname=entry_name
pdbcode = r.split("_")[2]
if len(v[0])>1:
print('multiple cuts?',entry_name,r,v[0])
cut = v[0][0] if v[0] else v[2][0]
if site in truncations_maximums:
if cut < min_cut:
min_cut = cut
if cut > max_cut:
max_cut = cut
# print(site,r,v,pdbcode,entry_name,cut)
entry_name += "_"+str(cut)
if entry_name not in unique_sites:
unique_sites[entry_name] = v
unique_sites[entry_name].append([]) #for pdbs
unique_sites[entry_name].append('') #for GPCR
unique_sites[entry_name].append('') #for Species
unique_sites[entry_name].append({'inactive':'','intermediate':'','active':''}) #for State
unique_sites[entry_name].append('') #for cut
unique_sites[entry_name].append(full_p_name[original_entryname])
unique_sites[entry_name].append([]) #for fusions
unique_sites[entry_name].append([]) #for linkers #10
if cut not in sites:
sites[cut] = 0
sites[cut] += 1
unique_sites[entry_name][3].append(pdbcode)
unique_sites[entry_name][4] = original_entryname.split("_")[0].upper()
unique_sites[entry_name][5] = original_entryname.split("_")[1].lower()
if unique_sites[entry_name][5]=='human':
unique_sites[entry_name][5] = ''
if unique_sites[entry_name][6][states[pdbcode]] != '':
unique_sites[entry_name][6][states[pdbcode]] += 1
else:
unique_sites[entry_name][6][states[pdbcode]] = 1
unique_sites[entry_name][7] = cut
if pdbcode in fusion_by_pdb:
if fusion_by_pdb[pdbcode] not in unique_sites[entry_name][9]:
unique_sites[entry_name][9].append(fusion_by_pdb[pdbcode])
if fusion_by_pdb[pdbcode] not in distinct_fusion:
distinct_fusion[fusion_by_pdb[pdbcode]] = 0
distinct_fusion[fusion_by_pdb[pdbcode]] += 1
if fusion_by_pdb[pdbcode] not in site_fusions[site]:
site_fusions[site].append(fusion_by_pdb[pdbcode])
if site=='icl3_fusion_start' and pdbcode in linkers_exist_before:
# print('FOUND',linkers_exist_before[pdbcode])
unique_sites[entry_name][10].append(str(linkers_exist_before[pdbcode]))
if site=='icl3_fusion_end' and pdbcode in linkers_exist_after:
# print('FOUND',linkers_exist_after[pdbcode])
unique_sites[entry_name][10].append(str(linkers_exist_after[pdbcode]))
# print(sites)
truncations_new_sum[site][pclass] = sites
if site in truncations_maximums:
unique_sites = OrderedDict(sorted(unique_sites.items(), key=lambda x: int(x[0].split("_")[-1])))
else:
unique_sites = OrderedDict(sorted(unique_sites.items(), key=lambda x: x[0].split("_")[-1]))
val['range'] = sorted(list(sites.keys()))
first_range = val['range'][0]
last_range = val['range'][-1]
prefix = val['range'][0].split('x')[0]
start = int(val['range'][0].split('x')[1])
end = int(val['range'][-1].split('x')[1])+1
max_pos_range2[site][2] = prefix
if start < max_pos_range2[site][0]:
max_pos_range2[site][0] = start
if end > max_pos_range2[site][1]:
max_pos_range2[site][1] = end
# print('\n ### doing range',site, sites,max_pos_range2[site],val['range'])
val['receptors'] = unique_sites
val['fusions'] = distinct_fusion
if site in truncations_maximums:
val['range'] = list(range(min_cut,truncations_maximums[site][pclass]+1))
if min_cut < max_pos_range3[site][0]:
max_pos_range3[site][0] = min_cut
if max_cut > max_pos_range3[site][1]:
max_pos_range3[site][1] = max_cut
if 'fusion' in site:
val['range'] = list(range(min_cut,max_cut+1))
if len(val['range'])>300:
val['range'] = val['range'][::2]
if len(val['range'])>max_pos_range[site]:
max_pos_range[site] = len(val['range'])
# Add offset to align tables
for site in truncations_new:
for pclass, val in truncations_new[site].items():
for recp, rval in val['receptors'].items():
if rval[10]:
# print(recp,rval[10])
if len(rval[10])!=len(rval[3]): #if pdbs with linker is not same as amount of linkers
rval[10].append('0')
rval[10] = ','.join(list(set(rval[10])))
else:
rval[10] = '' #no linkers
temp = {}
for fusion in site_fusions[site]:
if fusion in val['fusions']:
temp[fusion] = val['fusions'][fusion]
else:
temp[fusion] = ''
val['fusions'] = temp
if 'range' in val:
if len(val['range'])<max_pos_range[site]:
val['range'] = val['range'] + [5000] * (max_pos_range[site]-len(val['range']))
if site in truncations_maximums and 'fusion' in site:
val['range'] = list(range(max_pos_range3[site][0],max_pos_range3[site][1]+1))
if max_pos_range2[site][2] != 0:
temp = []
for x in range(max_pos_range2[site][0],max_pos_range2[site][1]):
temp.append(max_pos_range2[site][2]+"x"+str(x))
val['range'] = temp
temp = []
for x in val['range']:
if x in truncations_new_sum[site][pclass]:
temp.append(truncations_new_sum[site][pclass][x])
else:
temp.append('')
val['sum'] = temp
# print(linkers_exist_before,linkers_exist_after)
# print("NEWCHECK",truncations_new['icl3_start'])
for pos, p_vals in truncations_new_sum.items():
for pclass, c_vals in p_vals.items():
new_list = OrderedDict()
for position in truncations_new_possibilties[pos]:
if position in c_vals:
new_list[position] = c_vals[position]
else:
new_list[position] = ''
# print(pclass,c_vals,new_list)
if pos!='cterm':
truncations_new_sum[pos][pclass] = OrderedDict(reversed(list(new_list.items())))
else:
truncations_new_sum[pos][pclass] = OrderedDict(list(new_list.items()))
# print(truncations_new)
#truncations = OrderedDict(truncations)
ordered_truncations = OrderedDict()
for segment, s_vals in sorted(truncations.items()):
#print(segment)
ordered_truncations[segment] = OrderedDict()
for p_class, c_vals in sorted(s_vals.items()):
#print(p_class)
ordered_truncations[segment][p_class] = OrderedDict()
for pos, p_vals in sorted(c_vals.items(),key=lambda x: (len(x[1]),x[0]), reverse=True):
#print(pos, len(p_vals))
ordered_truncations[segment][p_class][pos] = p_vals
fusion_possibilities = truncations_new_possibilties['nterm_fusion'][::-1] + ['_'] + truncations_new_possibilties['icl2_fusion_start'] + ['3_4_length'] + ['3_4_deleted'] + truncations_new_possibilties['icl2_fusion_end'] + ['.'] + truncations_new_possibilties['icl3_fusion_start'] + ['5_6_length'] + ['5_6_deleted'] + truncations_new_possibilties['icl3_fusion_end']
# fusion_possibilities = truncations_new_possibilties['nterm_fusion'][::-1] + truncations_new_possibilties['icl3_start'] + truncations_new_possibilties['icl3_end']
# print('fusion_possibilities',fusion_possibilities)
track_fusion_sums = OrderedDict()
track_without_fusion_sums = OrderedDict()
for pclass, receptors in track_fusions.items():
track_fusion_sums[pclass] = OrderedDict()
for p in fusion_possibilities:
track_fusion_sums[pclass][p] = 0
for receptor, vals in receptors.items():
temp = []
for p in fusion_possibilities:
if p in vals['found']:
temp.append('C')
track_fusion_sums[pclass][p] += 1
elif p=='3_4_length' and vals['3_4_length']:
temp.append(vals['3_4_length'][0])
elif p=='3_4_deleted' and vals['3_4_deleted']:
temp.append(vals['3_4_deleted'][0])
elif p=='5_6_length' and vals['5_6_length']:
temp.append(vals['5_6_length'][0])
elif p=='5_6_deleted' and vals['5_6_deleted']:
temp.append(vals['5_6_deleted'][0])
else:
temp.append(0)
vals['for_print'] = temp
# print(receptor,vals)
for pclass, receptors in track_without_fusions.items():
track_without_fusion_sums[pclass] = OrderedDict()
for p in truncations_new_possibilties['icl3_start'] + ['5_6_length'] + ['5_6_deleted'] + truncations_new_possibilties['icl3_end']:
track_without_fusion_sums[pclass][p] = 0
for receptor, vals in receptors.items():
temp = []
for p in truncations_new_possibilties['icl3_start'] + ['5_6_length'] + ['5_6_deleted'] + truncations_new_possibilties['icl3_end']:
if p in vals['found']:
temp.append('C')
track_without_fusion_sums[pclass][p] += 1
elif p=='5_6_length' and vals['5_6_length']:
temp.append(vals['5_6_length'][0])
elif p=='5_6_deleted' and vals['5_6_deleted']:
temp.append(vals['5_6_deleted'][0])
else:
temp.append(0)
vals['for_print'] = temp
# print(track_fusion_sums)
for fusion, vals in track_fusions2.items():
temp = []
for p in fusion_possibilities:
# print(p)
if p in vals['found']:
temp.append(1)
else:
temp.append("")
vals['for_print'] = temp
# print(track_without_fusions)
#truncations = OrderedDict(sorted(truncations.items(), key=lambda x: x[1]['hits'],reverse=True))
#print(ordered_truncations)
# print(track_fusions2)
context['truncations'] = ordered_truncations
context['truncations_new'] = truncations_new
context['truncations_new_possibilties'] = truncations_new_possibilties
context['truncations_new_sum'] = truncations_new_sum
context['fusion_possibilities'] = fusion_possibilities
context['test'] = track_fusions
context['test2'] = track_fusions2
context['track_fusion_sums'] = track_fusion_sums
context['track_without_fusions'] = track_without_fusions
mutation_list = OrderedDict()
mutation_type = OrderedDict()
mutation_wt = OrderedDict()
mutation_mut = OrderedDict()
mutation_matrix = OrderedDict()
mutation_track = []
aa_list = list(AMINO_ACIDS.keys())[:20]
mutation_matrix_sum_mut = OrderedDict()
#print(aa_list)
for i, mut in enumerate(AMINO_ACIDS):
if i==20:
break
mutation_matrix[mut] = OrderedDict()
for aa in aa_list:
mutation_matrix[mut][aa] = [0,0]
mutation_matrix[mut][mut] = [0,'-']
mutation_matrix[mut]['sum'] = [0,0]
mutation_matrix_sum_mut[mut] = [0,0]
#print(mutation_matrix)
for mutation in mutations:
wt = mutation[0].wild_type_amino_acid
mut = mutation[0].mutated_amino_acid
entry_name = mutation[1]
pos = mutation[0].sequence_number
p_class = mutation[3]
p_class = class_names[p_class]
pdb = mutation[2]
mut_uniq = entry_name+'_'+str(pos)+'_'+wt+'_'+mut
if mut_uniq not in mutation_track:
# print(mut_uniq)
#do not count the same mutation (from different Xtals) multiple times
mutation_track.append(mut_uniq)
mutation_matrix[wt][mut][1] += 1
mutation_matrix[wt][mut][0] = min(1,round(mutation_matrix[wt][mut][1]/30,2))
mutation_matrix[wt]['sum'][1] += 1
mutation_matrix[wt]['sum'][0] = min(1,round(mutation_matrix[wt]['sum'][1]/30,2))
mutation_matrix_sum_mut[mut][1] += 1
mutation_matrix_sum_mut[mut][0] = min(1,round(mutation_matrix_sum_mut[mut][1]/30,2))
gn = ''
if entry_name in rs_lookup and pos in rs_lookup[entry_name]:
if rs_lookup[entry_name][pos].generic_number:
gn = rs_lookup[entry_name][pos].generic_number.label
# print(entry_name,"\t", pdb,"\t",gn,"\t", pos,"\t", wt,"\t", mut)
if p_class not in mutation_type:
mutation_type[p_class] = OrderedDict()
if wt+"=>"+mut not in mutation_type[p_class]:
mutation_type[p_class][wt+"=>"+mut] = {'hits':0, 'proteins':[]}
if entry_name not in mutation_type[p_class][wt+"=>"+mut]['proteins']:
mutation_type[p_class][wt+"=>"+mut]['proteins'].append(entry_name)
mutation_type[p_class][wt+"=>"+mut]['hits'] += 1
if p_class not in mutation_wt:
mutation_wt[p_class] = OrderedDict()
if wt not in mutation_wt[p_class]:
mutation_wt[p_class][wt] = {'hits':0, 'proteins':[]}
if entry_name not in mutation_wt[p_class][wt]['proteins']:
mutation_wt[p_class][wt]['proteins'].append(entry_name)
mutation_wt[p_class][wt]['hits'] += 1
if p_class not in mutation_mut:
mutation_mut[p_class] = OrderedDict()
if mut not in mutation_mut[p_class]:
mutation_mut[p_class][mut] = {'hits':0, 'proteins':[]}
if entry_name not in mutation_mut[p_class][mut]['proteins']:
mutation_mut[p_class][mut]['proteins'].append(entry_name)
mutation_mut[p_class][mut]['hits'] += 1
if entry_name not in rs_lookup:
continue
if pos not in rs_lookup[entry_name]:
continue
gn = rs_lookup[entry_name][pos].generic_number.label
if p_class not in mutation_list:
mutation_list[p_class] = OrderedDict()
if gn not in mutation_list[p_class]:
mutation_list[p_class][gn] = {'proteins':[], 'hits':0, 'mutation':[]}
if entry_name not in mutation_list[p_class][gn]['proteins']:
mutation_list[p_class][gn]['proteins'].append(entry_name)
mutation_list[p_class][gn]['hits'] += 1
mutation_list[p_class][gn]['mutation'].append((mutation[0].wild_type_amino_acid,mutation[0].mutated_amino_acid))
mutation_matrix_total_sum = sum([v[1] for k,v in mutation_matrix_sum_mut.items()])
for p_class, values in mutation_list.items():
for gn, vals in values.items():
if vals['hits']<2:
pass
#values.pop(gn, None)
mutation_list[p_class] = OrderedDict(sorted(values.items(), key=lambda x: x[1]['hits'],reverse=True))
#mutation_list = OrderedDict(sorted(mutation_list.items(), key=lambda x: x[1]['hits'],reverse=True))
for p_class, values in mutation_type.items():
mutation_type[p_class] = OrderedDict(sorted(values.items(), key=lambda x: x[1]['hits'],reverse=True))
for p_class, values in mutation_wt.items():
mutation_wt[p_class] = OrderedDict(sorted(values.items(), key=lambda x: x[1]['hits'],reverse=True))
for p_class, values in mutation_mut.items():
mutation_mut[p_class] = OrderedDict(sorted(values.items(), key=lambda x: x[1]['hits'],reverse=True))
context['mutation_list'] = mutation_list
context['mutation_type'] = mutation_type
context['mutation_wt'] = mutation_wt
context['mutation_mut'] = mutation_mut
context['mutation_matrix'] = mutation_matrix
context['mutation_matrix_sum_mut'] = mutation_matrix_sum_mut
context['mutation_matrix_total_sum'] = mutation_matrix_total_sum
context['rs_annotations'] = rs_annotations
for c in cons:
pass
cache_temp = {}
cache_temp['truncations'] = ordered_truncations
cache_temp['truncations_new'] = truncations_new
cache_temp['truncations_new_possibilties'] = truncations_new_possibilties
cache_temp['truncations_new_sum'] = truncations_new_sum
cache_temp['fusion_possibilities'] = fusion_possibilities
cache_temp['test'] = track_fusions
cache_temp['test2'] = track_fusions2
cache_temp['track_fusion_sums'] = track_fusion_sums
cache_temp['track_without_fusions'] = track_without_fusions
cache_temp['mutation_list'] = mutation_list
cache_temp['mutation_type'] = mutation_type
cache_temp['mutation_wt'] = mutation_wt
cache_temp['mutation_mut'] = mutation_mut
cache_temp['mutation_matrix'] = mutation_matrix
cache_temp['mutation_matrix_sum_mut'] = mutation_matrix_sum_mut
cache_temp['rs_annotations'] = rs_annotations
cache.set('construct_statistics', cache_temp, 60*60*24*2) #two days
return context
class ConstructTable(TemplateView):
"""
Fetching construct data for browser
"""
template_name = "construct/residuetable.html"
def get_context_data (self, **kwargs):
context = super(ConstructTable, self).get_context_data(**kwargs)
cons = Construct.objects.all().prefetch_related(
"crystal","mutations","purification","protein__family__parent__parent__parent", "insertions__insert_type", "modifications", "deletions", "crystallization__chemical_lists",
"protein__species","structure__pdb_code","structure__publication__web_link", "contributor")
#PREPARE DATA
proteins = Construct.objects.all().values_list('protein', flat = True)
#GRAB RESIDUES for mutations
mutations = []
positions = []
proteins = []
class_names = {}
classes = []
for c in cons:
p = c.protein
entry_name = p.entry_name
p_class = p.family.slug.split('_')[0]
if p_class not in classes:
classes.append(p_class)
pdb = c.crystal.pdb_code
for mutation in c.mutations.all():
if p.entry_name not in proteins:
proteins.append(entry_name)
mutations.append((mutation,entry_name,pdb,p_class,c.name))
if mutation.sequence_number not in positions:
positions.append(mutation.sequence_number)
rs = Residue.objects.filter(protein_conformation__protein__entry_name__in=proteins, sequence_number__in=positions).prefetch_related('generic_number','protein_conformation__protein','protein_segment')
#print(classes)
excluded_segment = ['C-term','N-term']
list(settings.REFERENCE_POSITIONS.keys())
align_segments = ProteinSegment.objects.all().filter(slug__in = list(settings.REFERENCE_POSITIONS.keys())).prefetch_related()
amino_acids_stats = {}
amino_acids_groups_stats = {}
accessible_in_class = {}
for c in classes:
#if c=='001':
# continue
amino_acids_stats[c] = {}
amino_acids_groups_stats[c] = {}
accessible_in_class[c] = []
if c =='001':
residue_set_name = 'Class A binding pocket'
elif c=='004':
residue_set_name = 'Class C binding pocket'
elif c=='005':
residue_set_name = 'Class F binding pocket'
else:
residue_set_name = ''
if residue_set_name:
rset = ResiduePositionSet.objects.get(name=residue_set_name)
for residue in rset.residue_position.all():
accessible_in_class[c].append(residue.label)
#print(accessible_in_class)
alignment_proteins = Protein.objects.filter(family__slug__startswith=c, species__common_name='Human', source__name='SWISSPROT')
#print(c,len(alignment_proteins))
a = Alignment()
a.load_proteins(alignment_proteins)
a.load_segments(align_segments) #get all segments to make correct diagrams
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
a.calculate_statistics()
#print(a.amino_acid_stats)
# {% for ns, segments in a.generic_numbers.items %}
# <tr>
# {% for s, num in segments.items %}
# {% for n, dn in num.items %}
# {% if 'Common G-alpha numbering scheme' in a.numbering_schemes.0 %}
# <td class="ali-td-generic-num">{{ dn|make_list|slice:'2:'|join:''}}</td>
# {% else %}
# <td class="ali-td-generic-num">{{ dn|safe }}</td>
# {% endif %}
# {% endfor %}
# <td class="ali-td"> </td>
# {% endfor %}
# </tr>
# {% endfor %}
s_id = 0
a_id = 0
for ns, segments in a.generic_numbers.items():
for s, num in segments.items():
for n, dn in num.items():
temp = []
temp2 = []
for i, aa in enumerate(AMINO_ACIDS):
temp.append(a.amino_acid_stats[i][s_id][a_id])
for i, aa in enumerate(AMINO_ACID_GROUPS):
temp2.append(a.feature_stats[i][s_id][a_id])
amino_acids_stats[c][n] = temp
amino_acids_groups_stats[c][n] = temp2
a_id += 1
s_id += 1
rs_lookup = {}
gns = []
for r in rs:
entry_name = r.protein_conformation.protein.entry_name
pos = r.sequence_number
segment = r.protein_segment.slug
if entry_name not in rs_lookup:
rs_lookup[entry_name] = {}
if pos not in rs_lookup[entry_name]:
rs_lookup[entry_name][pos] = r
count_per_gn = {}
for mutation in mutations:
entry_name = mutation[1]
pos = mutation[0].sequence_number
if rs_lookup[entry_name][pos].generic_number:
gn = rs_lookup[entry_name][pos].generic_number.label
if gn not in count_per_gn:
count_per_gn[gn] = {'hits': 0, 'proteins': []}
if entry_name not in count_per_gn[gn]['proteins']:
count_per_gn[gn]['proteins'].append(entry_name)
count_per_gn[gn]['hits'] += 1
#print(count_per_gn)
mutation_list = []
for mutation in mutations:
wt = mutation[0].wild_type_amino_acid
mut = mutation[0].mutated_amino_acid
entry_name = mutation[1]
pdb = mutation[2]
cname = mutation[4]
pos = mutation[0].sequence_number
p_class = mutation[3]
if p_class not in class_names:
class_names[p_class] = p.family.parent.parent.parent.name
p_class_name = class_names[p_class]
p_class = class_names[p_class]
if entry_name not in rs_lookup:
continue
if pos not in rs_lookup[entry_name]:
continue
segment = rs_lookup[entry_name][pos].protein_segment.slug
if rs_lookup[entry_name][pos].generic_number:
gn = rs_lookup[entry_name][pos].generic_number.label
stats = amino_acids_stats[mutation[3]][gn]
stats2 = amino_acids_groups_stats[mutation[3]][gn]
if gn in accessible_in_class[mutation[3]]:
accessible = 'yes'
else:
accessible = 'no'
count = count_per_gn[gn]['hits']
else:
gn = ''
stats = ''
stats2 = ''
accessible = 'N/A'
mutation_list.append({'entry_name':entry_name,'pdb':pdb,'cname':cname, 'segment':segment,'pos': pos, 'gn': gn, 'wt': wt, 'mut': mut,'p_class': p_class, 'amino_acids':stats, 'amino_acids_groups':stats2, 'accessible': accessible, 'count': count})
context['amino_acids'] = AMINO_ACIDS
context['amino_groups'] = AMINO_ACID_GROUPS
context['mutation_list'] = mutation_list
#print(mutation_list)
return context
class ConstructMutations(TemplateView):
"""
Fetching construct data for browser
"""
template_name = "construct/mutations.html"
def get_context_data (self, **kwargs):
context = super(ConstructMutations, self).get_context_data(**kwargs)
cons = Construct.objects.all().prefetch_related(
"crystal","mutations__effects","purification","protein__family__parent__parent__parent", "insertions__insert_type", "modifications", "deletions", "crystallization__chemical_lists",
"protein__species","structure__pdb_code","structure__publication__web_link", "contributor")
#PREPARE DATA
proteins = Construct.objects.all().values_list('protein', flat = True)
#GRAB RESIDUES for mutations
mutations = []
positions = []
proteins = []
class_names = {}
for c in cons:
# print(c)
p = c.protein
entry_name = p.entry_name
p_class = p.family.slug.split('_')[0]
pdb = c.crystal.pdb_code
for mutation in c.mutations.all():
if p.entry_name not in proteins:
proteins.append(entry_name)
mutations.append((mutation,entry_name,pdb,p_class,c.name,p))
if mutation.sequence_number not in positions:
positions.append(mutation.sequence_number)
rs = Residue.objects.filter(protein_conformation__protein__entry_name__in=proteins, sequence_number__in=positions).prefetch_related('generic_number','protein_conformation__protein','protein_segment')
rs_lookup = {}
gns = []
for r in rs:
# print("r",r)
entry_name = r.protein_conformation.protein.entry_name
pos = r.sequence_number
segment = r.protein_segment.slug
if entry_name not in rs_lookup:
rs_lookup[entry_name] = {}
if pos not in rs_lookup[entry_name]:
rs_lookup[entry_name][pos] = r
mutation_list = []
new_mutations = {}
overall_mut_types = set()
for mutation in mutations:
wt = mutation[0].wild_type_amino_acid
mut = mutation[0].mutated_amino_acid
mut_type = mutation[0].effects.all().values('slug')
mut_types = []
for eff in mut_type:
mut_types.append(eff['slug'])
overall_mut_types.add(eff['slug'])
mut_type = ",".join(mut_types)
# # for
# print(mut_type)
# mut_type = ''
entry_name = mutation[1]
pdb = mutation[2]
cname = mutation[4]
pos = mutation[0].sequence_number
p_class = mutation[3]
p = mutation[5]
if p_class not in class_names:
class_names[p_class] = p.family.parent.parent.parent.short
p_class_name = class_names[p_class]
p_class = class_names[p_class]
entry_short = p.entry_short
receptor_short = p.short
if entry_name not in rs_lookup:
continue
if pos not in rs_lookup[entry_name]:
continue
segment = rs_lookup[entry_name][pos].protein_segment.slug
if rs_lookup[entry_name][pos].generic_number:
gn = rs_lookup[entry_name][pos].generic_number.label
else:
gn = ''
key = mutation[1]+"_"+str(mutation[0].sequence_number)+"_"+mutation[0].mutated_amino_acid
if key not in new_mutations:
new_mutations[key] = {'entry_name':entry_short,'receptor_short':receptor_short,'cname':cname, 'segment':segment,'pos': pos, 'gn': gn, 'wt': wt, 'mut': mut,'p_class': p_class, 'type': set(), 'pdbs': set()}
new_mutations[key]['type'].update(mut_types)
new_mutations[key]['pdbs'].add(pdb)
mutation_list.append({'entry_name':entry_name,'pdb':pdb,'cname':cname, 'segment':segment,'pos': pos, 'gn': gn, 'wt': wt, 'mut': mut,'p_class': p_class, 'type': mut_type})
context['mutation_list'] = new_mutations
context['overall_mut_types'] = overall_mut_types
return context
def stabilisation_browser(request):
''' View to display and summarise mutation data for thermostabilising mutational constructs. '''
# Set up: Restructure the STRUCTURAL_RULES for the constructs into a crude-tree like structure to enable
# quick and concise searching within the for loops below.
structural_rule_tree = create_structural_rule_trees(STRUCTURAL_RULES)
# Get a list of all constructs.
constructs = Construct.objects.all()\
.order_by().only(
"protein__entry_name",
# "mutations__sequence_number",
# "mutations__residue__generic_number",
# "mutations__residue__protein_segment__slug",
# "mutations__mutated_amino_acid",
# "mutations__wild_type_amino_acid",
"protein__family__slug",
"protein__family__parent__parent__parent__name",
"structure__state__name",
"crystal__pdb_code")\
.prefetch_related(
"structure__state",
"mutations__residue__generic_number",
"mutations__residue__protein_segment",
"mutations__effects__bar",
"protein__family__parent__parent__parent",
"crystal")
# Get a list of all relevant proteins and generic numbers
conservation_proteins = constructs.values_list('protein__family__parent__parent__parent__name',
flat=True)\
.distinct()
conservation_gen_nums = constructs.values_list('mutations__residue__generic_number__label', flat=True).distinct()
# Calculate the conservation values for the mutations across their receptor families and protein classes.
# Alignment performed using generic numbers.
conservation = conservation_table(conservation_proteins, conservation_gen_nums)
# For each analysis mode, define the information that is to be used as a unique identifier for grouping data.
# I.e. for position_only, grouping is performed by class an position. Hence each row will have a unique class &
# position. This is used as the unique identifier, or ID. -- recorded in 'include_in_id'
# Each row has some calculated or 'unique' values, as well as the id. This is found below. However, for example,
# the wild type AA is not unique accross the pos_and_mut group, as so this must be removed from the row-info.
# This is recorded in 'exclude_from_info'
groupings = {
"all":{"include_in_id":['class', 'gen_num', 'wild_type', 'mutant'], "exclude_from_info":['']},
"pos_and_wt":{"include_in_id":['class', 'gen_num', 'wild_type'],
"exclude_from_info":['ala_leu_subset', 'ala_subset', 'mutant']},
"pos_and_mut":{"include_in_id":['class', 'gen_num', 'mutant'],
"exclude_from_info":['ala_leu_subset', 'wild_type']},
"position_only":{"include_in_id":['class', 'gen_num'],
"exclude_from_info":['ala_leu_subset', 'ala_subset', 'wild_type', 'mutant']}
}
# Set up dictionaries to record information.
mutation_groups = {"position_only":{}, "all":{}, "pos_and_wt":{}, "pos_and_mut":{}}
# Grab thermostabilising mutations
mutations_thermo = ConstructMutation.objects.filter(effects__slug='thermostabilising').all()\
.prefetch_related(
"construct__structure__state",
"residue__generic_number",
"residue__protein_segment",
"construct__protein__family__parent__parent__parent",
"construct__crystal")
# For each construct, get the needed information, and add to the context dictionary called mutation_list.
for mutant in mutations_thermo:
# Get info for the construct
struct_id = mutant.construct.structure_id
state = mutant.construct.structure.state.name
prot = mutant.construct.protein
p_class = prot.family.parent.parent.parent.short()
p_ligand = prot.family.parent.parent.short()
p_receptor = prot.family.parent.short()
print(p_receptor,'p_receptor')
real_receptor = prot.entry_short
real_receptor_iuphar = prot.short()
pdb = mutant.construct.crystal.pdb_code
# Get the generic number and segment, if known.
try:
if mutant.residue.generic_number is None:
generic_number = u'\u2014'
else:
generic_number = mutant.residue.generic_number.label
segment = mutant.residue.protein_segment.slug
except AttributeError:
generic_number = u'\u2014'
segment = u'\u2014'
# Collect the mutation info needed to create a unique group id, and the info relevant to the full row.
mutant_id = {'gen_num':generic_number, 'wild_type':mutant.wild_type_amino_acid,
'mutant':mutant.mutated_amino_acid, 'GPCR_count':0, 'segment':segment, 'class': p_class}
mutant_info = {'pdb':pdb,
'ligand': p_ligand,
'receptor': p_receptor,
'real_receptor': real_receptor,
'real_receptor_iuphar': real_receptor_iuphar,
'wild_type':mutant_id["wild_type"],
'mutant':mutant_id['mutant'],
'state':state,
'struct_id':struct_id}
# Check if the calculated columns have been calculated for the pos, wt & mut grouping.
# If so, all groups already have the column calculations needed added.
# If not, all other grouping info must be calculated anyway to retrieve the site info for the wt & mut
# grouping.
wt_mut_group_id = ",".join([str(val) for key, val in mutant_id.items()
if key in groupings['all']['include_in_id']])
if wt_mut_group_id not in mutation_groups['all']:
# In here: insert the code to find the site info
calced_cols = get_calculated_columns(structural_rule_tree,
mutant_id['mutant'],
mutant_id['wild_type'],
generic_number,
p_class,
p_receptor,
conservation)
# For each group, add the required info.
for group_name, attr in groupings.items():
# Create a dictionary of information pertaining to the whole group to which the mutant belongs
#
group_info = {key:item for key, item in mutant_id.items() if key not in attr['exclude_from_info']}
# Create a group ID (which will be unique for each grouping)
group_id = ",".join([str(val) for key, val in mutant_id.items()
if key in attr['include_in_id']])
# Get the context dict entry for which the mutant should be added.
# If none, create one with the group_info
group = mutation_groups[group_name].setdefault(group_id,
[group_info, {}]
)
# If the group is newly created, calculate the values for the Frequency and Conservation Cols
if group[1] == {}:
# Get propensity and hydrophobicity values.
group[0]['propensity'],\
group[0]['hydro'],\
group[0]["class_cons"],\
group[0]["receptor_fam_cons"],\
group[0]["ionic_lock"],\
group[0]["sodium_ion"],\
group[0]["res_switch"]\
= calced_cols[group_name]
# Add further information to group_info allow for fast mutation subset filtering.
if group_name == "all":
if mutant_id['mutant'] == 'A':
in_ala_subset = 'ala_subset'
elif mutant_id['wild_type'] == 'A' and mutant_id['mutant'] == 'L':
in_ala_subset = 'ala_subset'
else:
in_ala_subset = 'no_subset'
group[0]['ala_subset'] = in_ala_subset
# Count the number of construct mutations recorded in the row.
group[0]['GPCR_count'] += 1
# Remove unnecessary items from the mutant info
info = {key:set((item,)) for key, item in mutant_info.items() if key not in attr['include_in_id']}
if group[1] == {}:
# Initialise the dict with the first mutant.
group[1].update(info)
else:
# Add the specific mutant info.
for key, item in info.items():
group[1][key].update(item)
# Remove receptor family conservation info if row refers to >1 receptor family
if len(group[1]['receptor']) != 1:
group[0]["receptor_fam_cons"] = u'\u2014'
# Send the context dictionary to the template to be rendered
return render(request, "construct/stabilisation_browser.html",
{'pos_and_mut': mutation_groups['pos_and_mut'],
'pos_and_wt': mutation_groups['pos_and_wt'],
'all': mutation_groups['all'],
'position_only': mutation_groups["position_only"]})
def conservation_table(prot_classes, gen_nums):
'''Calculate the conservation values needed for the thermostabilisation view'''
table = {}
# Collect residue counts for all residues in the protein classes and at the generic number positions within the
# prot_classes and gen_nums set, grouped by amino acid, generic number, protein receptor family, and protein class.
residues = Residue.objects.order_by()\
.only(
"amino_acid",
"generic_number__label",
"protein_conformation__protein__species_id",
"protein_conformation__protein__source_id",
"protein_conformation__protein__family__parent__parent__parent__name")\
.prefetch_related(
"protein_conformation__protein__family__parent__parent__parent",
"protein_conformation__protein__species",
"protein_conformation__protein__source",
"generic_number")\
.filter(
protein_conformation__protein__family__parent__parent__parent__name__in=list(prot_classes),
protein_conformation__protein__species_id="1", protein_conformation__protein__source_id="1",
generic_number__label__in=list(gen_nums))\
.values(
'amino_acid',
'protein_conformation__protein__family__parent__parent__parent__name',
"protein_conformation__protein__family__parent__name",
"generic_number__label")\
.annotate(Count('amino_acid'))
# Restructure the data into table format, where each row contains the count for an amino acid at generic number
# position, for either a given protein class or receptor family.
for dic in residues:
prot_row = table.setdefault(
(dic['protein_conformation__protein__family__parent__parent__parent__name'], dic['generic_number__label']),
{'total':0})
prot_row['total'] += dic['amino_acid__count']
prot_row.setdefault(dic['amino_acid'], 0)
prot_row[dic['amino_acid']] += dic['amino_acid__count']
rec_row = table.setdefault(
(dic['protein_conformation__protein__family__parent__name'], dic['generic_number__label']), {'total':0})
rec_row['total'] += dic['amino_acid__count']
rec_row.setdefault(dic['amino_acid'], 0)
rec_row[dic['amino_acid']] += dic['amino_acid__count']
# Divide each row by it's total to get the frequency of each amino acid across the row (rather than it's count).
for _, row in table.items():
for amino_acid, count in row.items():
if amino_acid != 'total':
row[amino_acid] = round(count/row['total'], 2)
return table
def get_calculated_columns(rule_tree, mutant, wild_type, g_n, prot_class, rec_fam, conservation): # pylint: disable=too-many-arguments
''' Calculate the propensity, hydrophobicity and site info for the given mut & wt for each grouping'''
# Get the conservation values for the protein class and receptor family
class_cons = conservation.get((prot_class, g_n), {})
fam_cons = conservation.get((rec_fam, g_n), {})
# Get the part of the structural_rule_tree relevant to the position and generic number (& hence to all groupings).
related_rules = {
'ionic_lock_tree':rule_tree["ionic_lock_tree"].get(prot_class[6], {}).get(g_n, {}),
'sodium_ion_tree':rule_tree["sodium_ion_tree"].get(prot_class[6], {}).get(g_n, {}),
'residue_switch_tree':rule_tree["residue_switch_tree"].get(prot_class[6], {}).get(g_n, {}),
}
# Return a dictionary consisting of the data and site column entries for each grouping / data analysis mode.
return {
'position_only': get_data_pos_grouping(related_rules),
'pos_and_mut':get_data_mut_grouping(related_rules, mutant, class_cons, fam_cons),
'pos_and_wt':get_data_wt_grouping(related_rules, wild_type, class_cons, fam_cons),
'all': get_data_all_grouping(related_rules, mutant, wild_type, class_cons, fam_cons)
}
def get_data_pos_grouping(rules):
'''
Calculate the Data and Site columns in the browser view for the position only analysis mode
'''
# Note: an empty dictionary evaluates to False in an if statement,
ionic_lock = 'Pos Match' if rules['ionic_lock_tree'] else u'\u2014'
sodium_ion = 'Pos Match' if rules['sodium_ion_tree'] else u'\u2014'
residue_switch = 'Pos Match' if rules['residue_switch_tree'] else u'\u2014'
# There is no mutant or wild type info, so all data cols are returned as u'\u2014'
return (u'\u2014', u'\u2014', u'\u2014', u'\u2014', ionic_lock, sodium_ion, residue_switch)
def get_data_mut_grouping(rules, mutant, class_cons, fam_cons):
'''
Calculate the Data and Site columns in the browser view for the pos & mut analysis mode
'''
# Note: an empty dictionary evaluates to False in an if statement,
# Check that rules exist that apply to the class, position and gn.
if rules['ionic_lock_tree']:
# If so, check if there is a rule relevant to the mutant
if rules['ionic_lock_tree'].get(mutant, {}):
ionic_lock = 'Pos & Mutant AA Match'
else:
ionic_lock = 'Pos Match (But Not Mutant AA)'
else:
ionic_lock = u'\u2014'
if rules['sodium_ion_tree']:
if rules['sodium_ion_tree'].get(mutant, {}):
sodium_ion = 'Pos & AA Mutant Match'
else:
sodium_ion = 'Pos Match (But Not Mutant AA)'
else:
sodium_ion = u'\u2014'
if rules['residue_switch_tree']:
if rules['residue_switch_tree'].get(mutant, {}):
residue_switch = 'Pos & Mutant AA Match'
else:
residue_switch = 'Pos Match (But Not Mutant AA)'
else:
residue_switch = u'\u2014'
return (AA_PROPENSITY.get(mutant, u'\u2014'),
HYDROPHOBICITY.get(mutant, u'\u2014'),
class_cons.get(mutant, u'\u2014'),
fam_cons.get(mutant, u'\u2014'),
ionic_lock, sodium_ion, residue_switch)
def get_data_wt_grouping(rules, wild_type, class_cons, fam_cons):
'''
Calculate the Data and Site columns in the browser view for the pos & wt analysis mode
'''
# # Note: an empty dictionary evaluates to False in an if statement,
if rules['ionic_lock_tree']:
# Note: This is the simpliest, but not the most concise code.
# However, okay as code is VERY rarely used.
ionic_lock_set = set()
for _, wt_rule_dict in rules['ionic_lock_tree'].items():
for key in wt_rule_dict:
ionic_lock_set.add(key)
if wild_type in ionic_lock_set:
ionic_lock = 'Pos & Wild Type AA Match'
else:
ionic_lock = 'Pos Match (But Not Wild Type AA)'
else:
ionic_lock = u'\u2014'
# Check that rules exist that apply to the class, position and gn.
if rules['sodium_ion_tree']:
sodium_ion_set = set()
# If so, check if there is a rule relevant to the wild type. As the dictionary tree is constructed so that
# the mutant is in the 3rd level, and the wold type in the 4th. Hence each mutant branch must be checked
# for the wild type.
for _, wt_rule_dict in rules['sodium_ion_tree'].items():
for key in wt_rule_dict:
sodium_ion_set.add(key)
if wild_type in sodium_ion_set:
sodium_ion = 'Pos & Wild Type AA Match'
else:
sodium_ion = 'Pos Match (But Not Wild Type AA)'
else:
sodium_ion = u'\u2014'
if rules['residue_switch_tree']:
residue_switch_set = set()
for _, wt_rule_dict in rules['residue_switch_tree'].items():
for key in wt_rule_dict:
residue_switch_set.add(key)
if wild_type in residue_switch_set:
residue_switch = 'Pos & Wild Type AA Match'
else:
residue_switch = 'Pos Match (But Not Wild Type AA)'
else:
residue_switch = u'\u2014'
return (AA_PROPENSITY.get(wild_type, u'\u2014'),
HYDROPHOBICITY.get(wild_type, u'\u2014'),
class_cons.get(wild_type, u'\u2014'),
fam_cons.get(wild_type, u'\u2014'),
ionic_lock, sodium_ion, residue_switch)
def get_data_all_grouping(rules, mutant, wild_type, class_cons, fam_cons):
'''
Calculate the Data and Site columns in the browser view for the pos, mut & wt analysis mode
'''
# Get propensity fold change where possible
mut = AA_PROPENSITY.get(mutant, u'\u2014')
w_t = AA_PROPENSITY.get(wild_type, u'\u2014')
# Where possible, calculate the fold change
prop = u'\u2014' if isinstance(mut, str) | isinstance(w_t, str) else str(round(mut-w_t, 2))
# Append the mut and wt values to the end.
prop = prop + ' (' + str(mut) + u'\u2212'+ str(w_t) +')'
# Get hydrophobicity fold change where possible
mut = HYDROPHOBICITY.get(mutant, u'\u2014')
w_t = HYDROPHOBICITY.get(wild_type, u'\u2014')
hydro = u'\u2014' if isinstance(mut, str) | isinstance(w_t, str) else str(round(mut-w_t, 2))
hydro = hydro + ' (' + str(mut) + u'\u2212'+ str(w_t) +')'
# Get the receptor family conservation fold change where possible
mut = fam_cons.get(mutant, 0)
w_t = fam_cons.get(wild_type, 0)
rec_cons = u'\u2014' if isinstance(mut, str) | isinstance(w_t, str) else str(round(mut-w_t, 2))
rec_cons += ' ('+str(mut)+u'\u2212'+str(w_t)+')'
# Get the protein class conservation fold change where possible
mut = class_cons.get(mutant, 0)
w_t = class_cons.get(wild_type, 0)
prot_cons = u'\u2014' if isinstance(mut, str) | isinstance(w_t, str) else str(round(mut-w_t, 2))
prot_cons += ' ('+str(mut)+u'\u2212'+str(w_t)+')'
# Get site info from the structural site rules
ionic_lock = rules['ionic_lock_tree'].get(mutant, {}).get(wild_type, u'\u2014')
sodium_ion = rules['sodium_ion_tree'].get(mutant, {}).get(wild_type, u'\u2014')
residue_switch = rules['residue_switch_tree'].get(mutant, {}).get(wild_type, u'\u2014')
return (prop,
hydro,
prot_cons,
rec_cons,
ionic_lock,
sodium_ion,
residue_switch)
def parse_rule_definition(rule_def,rule_result):
'''
Take in a rule definition from the structural rules, and parse so that's it's suitable both for display and
use in the rule dictionaries.
Args:
- rule_def should be of the form:
Ionic / Sodium / Residue + ... ... + removal / contraction / addition
Returns:
site - meaning type of site the definiton refers to. to be 'ionic_lock', 'sodium_ion', or 'residue_switch'
definiton - the action at the site. to be 'Removed', 'Contracted', or 'Added'
'''
# Get the type of action in the definition
if rule_result[-7:] == 'Removal':
result = 'Removed'
elif rule_result[-11:] == 'Contraction':
result = 'Contracted'
else:
result = 'Added'
# Get action placement from the definition
if rule_def[:5] == 'Ionic':
site = 'ionic_lock'
elif rule_def[:6] == 'Sodium':
site = 'sodium_ion'
elif rule_def[:5] == 'State':
site = 'residue_switch'
else: # Then there is no sensible way to understand this rule.
site = 'other'
result = rule_def # Override previous rule finding.
return (site, result)
def create_structural_rule_trees(rule_dictionary):
'''
Restructure the structural rules from a list of dictionaries to a tree-like nested dictionary,
so that they may be easily and quickly searched.
I.e. each type of site gets its own tree/dictionary, as it has it's own column,
This allows for simplier code when querying the rules.
'''
structural_rule_trees = {'ionic_lock_tree':{}, 'sodium_ion_tree':{}, 'residue_switch_tree':{}, 'other_tree':{}}
# List of classes included by the 'All' class designation.
classes = {'A', 'B', 'C', 'F'}
# List of amino acids included by the 'X' amino acid designation.
amino_acids = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S',
'T', 'V', 'W', 'Y', 'B', 'Z', 'J']
# For each tree, initiate the inner class dictionary, for each class.
for _, tree in structural_rule_trees.items():
for prot_class in classes:
tree.setdefault(prot_class, {})
# For each class type in the Structural rules list, iterate through the contained dictionaries.
for item in {'A', 'B', 'C', 'All'}:
for rule in rule_dictionary[item]:
# Get the dictionary to which the rule pertains
site, definition = parse_rule_definition(rule['Design Principle'], rule["Addition / Removal"])
tree = structural_rule_trees[site+"_tree"]
# Get a set of the classes and wild type aas that the rule affects
rule_class = classes if rule['Class'] == 'All' else {rule['Class']}
rule_wt = amino_acids if rule['Wt AA'] == 'X' else rule['Wt AA'].split('/')
# Iterate through the keys in each rule, adding a 'branch' to the nested dictionary, as needed.
for prot_class in rule_class:
node = tree.setdefault(prot_class, {})\
.setdefault(rule['Generic Position'], {})\
.setdefault(rule['Mut AA'], {})
for acid in rule_wt:
# If the rule definition is already stored, append the next definition to it.
# Otherwise, create a new entry, consisting of the rule definiton.
acid_node = node.get(acid, "")
if acid_node == "":
# Then no previous rules.
node[acid] = definition
else: # Add to the previous results
node[acid] = acid_node + ", " + definition
return structural_rule_trees
def fetch_all_pdb(request):
structures = Structure.objects.filter(refined=False)
for s in structures:
pdbname = str(s)
print(pdbname)
failed = []
try:
protein = Protein.objects.filter(entry_name=pdbname.lower()).get()
d = fetch_pdb_info(pdbname,protein)
#delete before adding new
Construct.objects.filter(name=d['construct_crystal']['pdb_name']).delete()
add_construct(d)
except:
print(pdbname,'failed')
failed.append(pdbname)
context = {'failed':failed}
return render(request,'pdb_all.html',context)
def fetch_pdb(request, slug):
protein = Protein.objects.filter(entry_name=slug.lower()).get()
d = fetch_pdb_info(slug,protein)
#delete before adding new
print(d['construct_crystal']['pdb_name'])
#Construct.objects.filter(name__iexact=d['construct_crystal']['pdb_name']).delete()
#add_construct(d)
#cache.delete(d['construct_crystal']['pdb_name']+'_schematics')
#cache.delete(d['construct_crystal']['pdb_name']+'_snake')
context = {'d':d}
return render(request,'pdb_fetch.html',context)
def fetch_pdb_for_webform(request, slug, **response_kwargs):
slug = slug.lower()
protein = Protein.objects.filter(entry_name=slug).get()
d = fetch_pdb_info(slug,protein)
d = convert_ordered_to_disordered_annotation(d)
jsondata = json.dumps(d)
response_kwargs['content_type'] = 'application/json'
return HttpResponse(jsondata, **response_kwargs)
class ConstructBrowser(TemplateView):
"""
Fetching construct data for browser
"""
template_name = "construct_browser.html"
def get_context_data (self, **kwargs):
context = super(ConstructBrowser, self).get_context_data(**kwargs)
try:
cons = Construct.objects.all().prefetch_related(
"crystal","mutations","purification","protein__family__parent__parent__parent", "insertions__insert_type", "modifications", "deletions", "crystallization__chemical_lists",
"protein__species","structure__pdb_code","structure__publication__web_link", "contributor")
context['constructs'] = []
for c in cons:
#c.schematics = c.schematic()
c.wt_schematic = c.wt_schematic()
c.cons_schematic = c.cons_schematic()
context['constructs'].append(c)
except Construct.DoesNotExist as e:
pass
return context
class ExperimentBrowser(TemplateView):
"""
Fetching construct data for browser
"""
template_name = "experimental_browser.html"
def get_context_data (self, **kwargs):
context = super(ExperimentBrowser , self).get_context_data(**kwargs)
try:
cons = Construct.objects.all().prefetch_related(
"crystal","mutations","purification","protein__family__parent__parent__parent", "insertions__insert_type","expression","solubilization", "modifications", "deletions",
"crystallization__crystal_method", "crystallization__crystal_type",
"crystallization__chemical_lists", "crystallization__chemical_lists__chemicals__chemical__chemical_type",
"protein__species","structure__pdb_code","structure__publication__web_link", "contributor").annotate(pur_count = Count('purification__steps')).annotate(sub_count = Count('solubilization__chemical_list__chemicals'))
#context['constructs'] = cache.get('construct_browser')
#if context['constructs']==None:
context['constructs'] = []
context['schematics'] = []
for c in cons:
# c.schematic_cache = c.schematic()
c.summary = c.chem_summary()
context['constructs'].append(c)
#cache.set('construct_browser', context['constructs'], 60*60*24*2) #two days
# else:
# print('construct_browser used cache')
except Construct.DoesNotExist as e:
pass
return context
class design(AbsTargetSelection):
# Left panel
step = 1
number_of_steps = 1
# docs = 'generic_numbering.html' # FIXME
# description = 'Select receptors to index by searching or browsing in the middle column. You can select entire' \
# + ' receptor families and/or individual receptors.\n\nSelected receptors will appear in the right column,' \
# + ' where you can edit the list.\n\nSelect which numbering schemes to use in the middle column.\n\nOnce you' \
# + ' have selected all your receptors, click the green button.'
description = '''This is a tool to design structure constructs based on all published GPCR structures.
A modification can be based on a closest template, most frequent solution or structural rationale (mutations)'''
# Middle section
numbering_schemes = False
filters = False
search = True
title = "Select a receptor"
template_name = 'designselection.html'
type_of_selection = 'targets'
selection_only_receptors = True
selection_boxes = OrderedDict([
('reference', False),
('targets', False),
('segments', False),
])
# Buttons
buttons = {
'continue': {
'label': 'Show results',
'onclick': 'submitupload()',
'color': 'success',
'url': '/construct/tool/',
#'url': 'calculate/'
}
}
redirect_on_select = True
selection_heading = "Construct Design Tool"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
@csrf_exempt #jquery send post, so no csrf
def align(request):
ids = json.loads(request.POST.get('ids'))
c_ids = []
s_ids = []
for i in ids:
if i.startswith('align'):
s_ids.append(i.split('_')[1])
else:
c_ids.append(i)
cons = Construct.objects.filter(pk__in=c_ids).prefetch_related(
"crystal","mutations","purification","protein__family__parent__parent__parent", "insertions", "modifications", "deletions", "crystallization__chemical_lists",
"protein__species","structure__pdb_code","structure__publication__web_link", "contributor")
proteins = []
constructs = OrderedDict()
annotations = {}
for c in cons:
# print(c)
proteins.append(c.protein)
constructs[c.name] = c.protein.entry_name
annotations[c.name] = c.schematic()['annotations']
print(annotations)
if len(s_ids):
rs = Residue.objects.filter(protein_conformation__protein__in=proteins, protein_segment__slug__in=s_ids).prefetch_related(
'protein_conformation__protein', 'protein_conformation__state', 'protein_segment',
'generic_number__scheme', 'display_generic_number__scheme')
else:
s_ids = ['N-term','TM1','ICL1','TM2','ECL1','TM3','ICL2','TM4','ECL2','TM5','ICL3','TM6','ECL3','TM7','ICL4','H8','C-term']
rs = Residue.objects.filter(protein_conformation__protein__in=proteins).prefetch_related(
'protein_conformation__protein', 'protein_conformation__state', 'protein_segment',
'generic_number__scheme', 'display_generic_number__scheme')
print("residues",len(rs))
distinct_gn = []
ordered_gn = OrderedDict()
distinct_segments = []
overview = OrderedDict()
segment_length = OrderedDict()
for s in s_ids:
overview[s] = OrderedDict()
segment_length[s] = {'aligned':0, 'before':0,'after':0,'total':0}
protein_lookup = {}
print('build stuff')
segment = ''
protein = ''
track_unaligned = {}
#Find all possible generic numbers, to ensure gaps
for r in rs.order_by('protein_conformation__id','sequence_number'):
if segment!=r.protein_segment.slug or protein!=r.protein_conformation.protein.entry_name:
no_encountered_gn = True
length = 0
length_before = 0
length_after = 0
segment = r.protein_segment.slug
protein = r.protein_conformation.protein.entry_name
if protein not in protein_lookup:
protein_lookup[protein] = {}
track_unaligned[protein] = {}
if segment not in track_unaligned[protein]:
track_unaligned[protein][segment] = {'before':[],'after':[]}
if segment not in distinct_segments:
distinct_segments.append(segment)
overview[segment] = OrderedDict()
if r.generic_number:
no_encountered_gn = False
gn = r.generic_number.label
protein_lookup[protein][gn] = {'aa':r.amino_acid,'pos':r.sequence_number}
gn_sort = gn.split('x')[1]
gn_sort = float("0."+gn_sort)
if gn not in distinct_gn:
distinct_gn.append(gn)
overview[segment][gn_sort] = [gn,{'aa':'-','pos':''}]
length += 1
else:
if no_encountered_gn:
track_unaligned[protein][segment]['before'].append({'aa':r.amino_acid,'pos':r.sequence_number})
length_before += 1
else:
track_unaligned[protein][segment]['after'].append({'aa':r.amino_acid,'pos':r.sequence_number})
length_after += 1
if len(overview[segment])>segment_length[segment]['aligned']:
segment_length[segment]['aligned'] = len(overview[segment])
if length_before>segment_length[segment]['before']:
segment_length[segment]['before'] = length_before
if length_after>segment_length[segment]['after']:
segment_length[segment]['after'] = length_after
if segment_length[segment]['aligned']+segment_length[segment]['before']+segment_length[segment]['after']>segment_length[segment]['total']:
segment_length[segment]['total'] = segment_length[segment]['aligned']+segment_length[segment]['before']+segment_length[segment]['after']
# SORT generic residues to ensure correct order
gn_list = ""
ordered_summary = OrderedDict()
for seg,gns in overview.items():
ordered_summary[seg] = OrderedDict()
#GN LIST
gn_list += """<td class="ali-td ali-residue res-color-X"> </td>"""
if seg!='C-term':
for _ in range(segment_length[seg]['before']):
gn_list += """<td class="ali-td"> </td>"""
for gn in sorted(gns):
ordered_summary[seg][gns[gn][0]] = {'aa':'-','pos':''}
gn_list += """<td class="ali-td-generic-num">{}</td>""".format("x"+gns[gn][0].split("x")[1])
if seg=='C-term':
for _ in range(segment_length[seg]['before']):
gn_list += """<td class="ali-td"> </td>"""
for _ in range(segment_length[seg]['after']):
gn_list += """<td class="ali-td"> </td>"""
alignment = OrderedDict()
alignment_print_sequence = ""
for c,p in constructs.items():
alignment[c] = copy.deepcopy(ordered_summary)
alignment_print_sequence += '<tr>'
for seg,gns in alignment[c].items():
if p not in track_unaligned:
track_unaligned[p] = {seg: {'before':[],'after':[]}}
if p not in protein_lookup:
protein_lookup[p] = {}
if seg not in track_unaligned[p]:
track_unaligned[p][seg] = {'before':[],'after':[]}
alignment_print_sequence += """<td class="ali-td ali-residue res-color-_"> </td>"""
if seg!='C-term':
for _ in range(segment_length[seg]['before']-len(track_unaligned[p][seg]['before'])):
alignment_print_sequence += """<td class="ali-td ali-residue res-color-">
-</td>"""
for aa in track_unaligned[p][seg]['before']:
if aa['pos'] in annotations[c]:
annotation = annotations[c][aa['pos']][0]
annotation_text = "<br>"+annotations[c][aa['pos']][1]
else:
annotation = ''
annotation_text = ''
alignment_print_sequence += """<td class="ali-td ali-residue res-color-{}">
<div data-toggle="tooltip" data-placement="top" data-html="true"
title="{}{}{}">{}</div></td>""".format(annotation,aa['aa'],aa['pos'],annotation_text,aa['aa'])
for gn, aa in gns.items():
if gn in protein_lookup[p]:
aa = protein_lookup[p][gn]
alignment[c][seg][gn] = aa
if aa['pos'] in annotations[c]:
annotation = annotations[c][aa['pos']][0]
annotation_text = "<br>"+annotations[c][aa['pos']][1]
else:
annotation = ''
annotation_text = ''
alignment_print_sequence += """<td class="ali-td ali-residue res-color-{}">
<div data-toggle="tooltip" data-placement="top" data-html="true"
title="{}{}<br>SCHEME: {}{}">{}</div></td>""".format(annotation,aa['aa'],aa['pos'],gn,annotation_text,aa['aa'])
for aa in track_unaligned[p][seg]['after']:
if aa['pos'] in annotations[c]:
annotation = annotations[c][aa['pos']][0]
annotation_text = "<br>"+annotations[c][aa['pos']][1]
else:
annotation = ''
annotation_text = ''
alignment_print_sequence += """<td class="ali-td ali-residue res-color-{}">
<div data-toggle="tooltip" data-placement="top" data-html="true"
title="{}{}{}">{}</div></td>""".format(annotation,aa['aa'],aa['pos'],annotation_text,aa['aa'])
for _ in range(segment_length[seg]['after']-len(track_unaligned[p][seg]['after'])):
alignment_print_sequence += """<td class="ali-td ali-residue res-color-">
-</td>"""
if seg=='C-term':
for _ in range(segment_length[seg]['before']-len(track_unaligned[p][seg]['before'])):
alignment_print_sequence += """<td class="ali-td ali-residue res-color-">
-</td>"""
alignment_print_sequence += '</tr>'
print('done',len(alignment_print_sequence))
context = {'constructs': constructs,'alignment_print_sequence': alignment_print_sequence, 'segment_length' : segment_length, 'gn_list' : gn_list, 'segments': s_ids, 'c_ids': json.dumps(c_ids)} #, 'alignment_print_sequence': alignment_print_sequence
return render(request,'align.html',context)
|
{
"content_hash": "0e38c221ed5068ce9a7b70ae9d602c9b",
"timestamp": "",
"source": "github",
"line_count": 2351,
"max_line_length": 379,
"avg_line_length": 51.393024245002124,
"alnum_prop": 0.5432898820608317,
"repo_name": "fosfataza/protwis",
"id": "094c222543c805cfbf845fac2cf288ffc90c0ae8",
"size": "120825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "construct/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "104739"
},
{
"name": "HTML",
"bytes": "1426027"
},
{
"name": "JavaScript",
"bytes": "1127392"
},
{
"name": "Python",
"bytes": "2593740"
},
{
"name": "Shell",
"bytes": "386"
}
],
"symlink_target": ""
}
|
"""
This module provides tools for deblending overlapping sources labeled in
a segmentation image.
"""
import warnings
from multiprocessing import cpu_count, get_context
import numpy as np
from astropy.units import Quantity
from astropy.utils.decorators import deprecated_renamed_argument
from astropy.utils.exceptions import AstropyUserWarning
from photutils.segmentation.core import SegmentationImage
from photutils.segmentation.detect import _detect_sources
from photutils.segmentation.utils import _make_binary_structure
from photutils.utils._convolution import _filter_data
from photutils.utils._optional_deps import HAS_TQDM
__all__ = ['deblend_sources']
@deprecated_renamed_argument('kernel', None, '1.5', message='"kernel" was '
'deprecated in version 1.5 and will be removed '
'in a future version. Instead, if filtering is '
'desired, please input a convolved image '
'directly into the "data" parameter.')
def deblend_sources(data, segment_img, npixels, kernel=None, labels=None,
nlevels=32, contrast=0.001, mode='exponential',
connectivity=8, relabel=True, nproc=1, progress_bar=True):
"""
Deblend overlapping sources labeled in a segmentation image.
Sources are deblended using a combination of multi-thresholding and
`watershed segmentation
<https://en.wikipedia.org/wiki/Watershed_(image_processing)>`_. In
order to deblend sources, there must be a saddle between them.
Parameters
----------
data : 2D `~numpy.ndarray`
The 2D data array. This array should be the same array used in
`~photutils.segmentation.detect_sources`.
.. note::
It is strongly recommended that the user convolve the data
with ``kernel`` and input the convolved data directly
into the ``data`` parameter. In this case do not input a
``kernel``, otherwise the data will be convolved twice.
segment_img : `~photutils.segmentation.SegmentationImage`
The segmentation image to deblend.
npixels : int
The minimum number of connected pixels, each greater than
``threshold``, that an object must have to be deblended.
``npixels`` must be a positive integer.
kernel : 2D `~numpy.ndarray` or `~astropy.convolution.Kernel2D`, optional
Deprecated. If filtering is desired, please input a convolved
image directly into the ``data`` parameter.
The 2D kernel used to filter the image before thresholding.
Filtering the image will smooth the noise and maximize
detectability of objects with a shape similar to the kernel.
``kernel`` must be `None` if the input ``data`` are already
convolved.
labels : int or array-like of int, optional
The label numbers to deblend. If `None` (default), then all
labels in the segmentation image will be deblended.
nlevels : int, optional
The number of multi-thresholding levels to use for deblending.
Each source will be re-thresholded at ``nlevels`` levels spaced
between its minimum and maximum values (non-inclusive). The
``mode`` keyword determines how the levels are spaced.
contrast : float, optional
The fraction of the total source flux that a local peak must
have (at any one of the multi-thresholds) to be deblended
as a separate object. ``contrast`` must be between 0 and 1,
inclusive. If ``contrast=0`` then every local peak will be made
a separate object (maximum deblending). If ``contrast=1`` then
no deblending will occur. The default is 0.001, which will
deblend sources with a 7.5 magnitude difference.
mode : {'exponential', 'linear', 'sinh'}, optional
The mode used in defining the spacing between the
multi-thresholding levels (see the ``nlevels`` keyword) during
deblending. The ``'exponential'`` and ``'sinh'`` modes have
more threshold levels near the source minimum and less near
the source maximum. The ``'linear'`` mode evenly spaces the
threshold levels between the source minimum and maximum.
The ``'exponential'`` and ``'sinh'`` modes differ in that
the ``'exponential'`` levels are dependent on the source
maximum/minimum ratio (smaller ratios are more linear; larger
ratios are more exponential), while the ``'sinh'`` levels
are not. Also, the ``'exponential'`` mode will be changed to
``'linear'`` for sources with non-positive minimum data values.
connectivity : {8, 4}, optional
The type of pixel connectivity used in determining how pixels
are grouped into a detected source. The options are 8 (default)
or 4. 8-connected pixels touch along their edges or corners.
4-connected pixels touch along their edges. The ``connectivity``
must be the same as that used to create the input segmentation
image.
relabel : bool, optional
If `True` (default), then the segmentation image will be
relabeled such that the labels are in consecutive order starting
from 1.
nproc : int, optional
The number of processes to use for multiprocessing (if larger
than 1). If set to 1, then a serial implementation is used
instead of a parallel one. If `None`, then the number of
processes will be set to the number of CPUs detected on the
machine. Please note that due to overheads, multiprocessing may
be slower than serial processing. This is especially true if one
only has a small number of sources to deblend. The benefits of
multiprocessing require ~1000 or more sources to deblend, with
larger gains as the number of sources increase.
progress_bar : bool, optional
Whether to display a progress bar. Note that if multiprocessing
is used (``nproc > 1``), the estimation times (e.g., time per
iteration and time remaining, etc) may be unreliable. The
progress bar requires that the `tqdm <https://tqdm.github.io/>`_
optional dependency be installed. Note that the progress
bar does not currently work in the Jupyter console due to
limitations in ``tqdm``.
Returns
-------
segment_image : `~photutils.segmentation.SegmentationImage`
A segmentation image, with the same shape as ``data``, where
sources are marked by different positive integer values. A
value of zero is reserved for the background.
See Also
--------
:func:`photutils.segmentation.detect_sources`
:class:`photutils.segmentation.SourceFinder`
"""
if isinstance(data, Quantity):
data = data.value
if not isinstance(segment_img, SegmentationImage):
raise ValueError('segment_img must be a SegmentationImage')
if segment_img.shape != data.shape:
raise ValueError('The data and segmentation image must have '
'the same shape')
if nlevels < 1:
raise ValueError('nlevels must be >= 1')
if contrast < 0 or contrast > 1:
raise ValueError('contrast must be >= 0 and <= 1')
if mode not in ('exponential', 'linear', 'sinh'):
raise ValueError('mode must be "exponential", "linear", or "sinh"')
if labels is None:
labels = segment_img.labels
else:
labels = np.atleast_1d(labels)
segment_img.check_labels(labels)
# include only sources that have at least (2 * npixels);
# this is required for it to be deblended into multiple sources,
# each with a minimum of npixels
mask = (segment_img.areas[segment_img.get_indices(labels)]
>= (npixels * 2))
labels = labels[mask]
footprint = _make_binary_structure(data.ndim, connectivity)
if kernel is not None:
data = _filter_data(data, kernel, mode='constant',
fill_value=0.0) # pragma: no cover
if nproc is None:
nproc = cpu_count() # pragma: no cover
if progress_bar and HAS_TQDM:
from tqdm.auto import tqdm # pragma: no cover
segm_deblended = object.__new__(SegmentationImage)
segm_deblended._data = np.copy(segment_img.data)
last_label = segment_img.max_label
indices = segment_img.get_indices(labels)
all_source_data = []
all_source_segments = []
all_source_slices = []
for label, idx in zip(labels, indices):
source_slice = segment_img.slices[idx]
source_data = data[source_slice]
source_segment = object.__new__(SegmentationImage)
source_segment._data = segment_img.data[source_slice]
source_segment.keep_labels(label) # include only one label
all_source_data.append(source_data)
all_source_segments.append(source_segment)
all_source_slices.append(source_slice)
if nproc == 1:
if progress_bar and HAS_TQDM:
all_source_data = tqdm(all_source_data,
desc='Deblending') # pragma: no cover
all_source_deblends = []
for source_data, source_segment in zip(all_source_data,
all_source_segments):
deblender = _Deblender(source_data, source_segment, npixels,
footprint, nlevels, contrast, mode)
source_deblended = deblender.deblend_source()
all_source_deblends.append(source_deblended)
else:
nlabels = len(labels)
args_all = zip(all_source_data, all_source_segments,
(npixels,) * nlabels, (footprint,) * nlabels,
(nlevels,) * nlabels, (contrast,) * nlabels,
(mode,) * nlabels)
if progress_bar and HAS_TQDM:
args_all = tqdm(args_all, total=nlabels,
desc='Deblending') # pragma: no cover
with get_context('spawn').Pool(processes=nproc) as executor:
all_source_deblends = executor.starmap(_deblend_source, args_all)
nonposmin_labels = []
nmarkers_labels = []
for (label, source_deblended, source_slice) in zip(
labels, all_source_deblends, all_source_slices):
if source_deblended is not None:
# replace the original source with the deblended source
segment_mask = (source_deblended.data > 0)
segm_deblended._data[source_slice][segment_mask] = (
source_deblended.data[segment_mask] + last_label)
last_label += source_deblended.nlabels
if hasattr(source_deblended, 'warnings'):
if source_deblended.warnings.get('nonposmin',
None) is not None:
nonposmin_labels.append(label)
if source_deblended.warnings.get('nmarkers',
None) is not None:
nmarkers_labels.append(label)
if nonposmin_labels or nmarkers_labels:
segm_deblended.info = {'warnings': {}}
warnings.warn('The deblending mode of one or more source labels from '
'the input segmentation image was changed from '
f'"{mode}" to "linear". See the "info" attribute '
'for the list of affected input labels.',
AstropyUserWarning)
if nonposmin_labels:
warn = {'message': f'Deblending mode changed from {mode} to '
'linear due to non-positive minimum data values.',
'input_labels': np.array(nonposmin_labels)}
segm_deblended.info['warnings']['nonposmin'] = warn
if nmarkers_labels:
warn = {'message': f'Deblending mode changed from {mode} to '
'linear due to too many potential deblended sources.',
'input_labels': np.array(nmarkers_labels)}
segm_deblended.info['warnings']['nmarkers'] = warn
if relabel:
segm_deblended.relabel_consecutive()
return segm_deblended
def _deblend_source(source_data, source_segment, npixels, footprint, nlevels,
contrast, mode):
"""
Convenience function to deblend a single labeled source with
multiprocessing.
"""
deblender = _Deblender(source_data, source_segment, npixels, footprint,
nlevels, contrast, mode)
return deblender.deblend_source()
class _Deblender:
"""
Class to deblend a single labeled source.
Parameters
----------
source_data : 2D `~numpy.ndarray`
The cutout data array for a single source. ``data`` should
also already be smoothed by the same filter used in
:func:`~photutils.segmentation.detect_sources`, if applicable.
source_segment : `~photutils.segmentation.SegmentationImage`
A cutout `~photutils.segmentation.SegmentationImage` object with
the same shape as ``data``. ``segment_img`` should contain only
*one* source label.
npixels : int
The number of connected pixels, each greater than ``threshold``,
that an object must have to be detected. ``npixels`` must be a
positive integer.
nlevels : int
The number of multi-thresholding levels to use. Each source
will be re-thresholded at ``nlevels`` levels spaced
exponentially or linearly (see the ``mode`` keyword) between its
minimum and maximum values within the source segment.
contrast : float
The fraction of the total (blended) source flux that a local
peak must have (at any one of the multi-thresholds) to be
considered as a separate object. ``contrast`` must be between 0
and 1, inclusive. If ``contrast = 0`` then every local peak
will be made a separate object (maximum deblending). If
``contrast = 1`` then no deblending will occur. The default is
0.001, which will deblend sources with a 7.5 magnitude
difference.
mode : {'exponential', 'linear', 'sinh'}
The mode used in defining the spacing between the
multi-thresholding levels (see the ``nlevels`` keyword). The
default is 'exponential'.
Returns
-------
segment_image : `~photutils.segmentation.SegmentationImage`
A segmentation image, with the same shape as ``data``, where
sources are marked by different positive integer values. A
value of zero is reserved for the background. Note that the
returned `SegmentationImage` will have consecutive labels
starting with 1.
"""
def __init__(self, source_data, source_segment, npixels, footprint,
nlevels, contrast, mode):
self.source_data = source_data
self.source_segment = source_segment
self.npixels = npixels
self.footprint = footprint
self.nlevels = nlevels
self.contrast = contrast
self.mode = mode
self.warnings = {}
self.segment_mask = source_segment.data.astype(bool)
self.source_values = source_data[self.segment_mask]
self.source_min = np.nanmin(self.source_values)
self.source_max = np.nanmax(self.source_values)
self.source_sum = np.nansum(self.source_values)
self.label = source_segment.labels[0] # should only be 1 label
# NOTE: this includes the source min/max, but we exclude those
# later, giving nlevels thresholds between min and max
# (noninclusive; i.e., nlevels + 1 parts)
self.linear_thresholds = np.linspace(self.source_min, self.source_max,
self.nlevels + 2)
def normalized_thresholds(self):
return ((self.linear_thresholds - self.source_min)
/ (self.source_max - self.source_min))
def compute_thresholds(self):
"""
Compute the multi-level detection thresholds for the source.
"""
if self.mode == 'exponential' and self.source_min <= 0:
self.warnings['nonposmin'] = 'non-positive minimum'
self.mode = 'linear'
if self.mode == 'linear':
thresholds = self.linear_thresholds
elif self.mode == 'sinh':
a = 0.25
minval = self.source_min
maxval = self.source_max
thresholds = self.normalized_thresholds()
thresholds = np.sinh(thresholds / a) / np.sinh(1.0 / a)
thresholds *= (maxval - minval)
thresholds += minval
elif self.mode == 'exponential':
minval = self.source_min
maxval = self.source_max
thresholds = self.normalized_thresholds()
thresholds = minval * (maxval / minval) ** thresholds
return thresholds[1:-1] # do not include source min and max
def multithreshold(self):
"""
Perform multithreshold detection for each source.
"""
thresholds = self.compute_thresholds()
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=RuntimeWarning)
segments = _detect_sources(self.source_data, thresholds,
self.npixels, self.footprint,
self.segment_mask, deblend_mode=True)
return segments
def make_markers(self, segments):
"""
Make markers (possible sources) for the watershed algorithm.
Parameters
----------
segments : list of `~photutils.segmentation.SegmentationImage`
A list of segmentation images, one for each threshold.
Returns
-------
markers : list of `~photutils.segmentation.SegmentationImage`
A list of segmentation images that contain possible sources
as markers. The last list element contains all of the
potential source markers.
"""
from scipy.ndimage import label as ndi_label
for i in range(len(segments) - 1):
segm_lower = segments[i].data
segm_upper = segments[i + 1].data
markers = segm_lower.astype(bool)
relabel = False
# if the are more sources at the upper level, then
# remove the parent source(s) from the lower level,
# but keep any sources in the lower level that do not have
# multiple children in the upper level
for label in segments[i].labels:
mask = (segm_lower == label)
# find label mapping from the lower to upper level
upper_labels = segm_upper[mask]
upper_labels = np.unique(upper_labels[upper_labels != 0])
if upper_labels.size >= 2:
relabel = True
markers[mask] = segm_upper[mask].astype(bool)
if relabel:
segm_data, nlabels = ndi_label(markers,
structure=self.footprint)
segm_new = object.__new__(SegmentationImage)
segm_new._data = segm_data
segm_new.__dict__['labels'] = np.arange(nlabels) + 1
segments[i + 1] = segm_new
else:
segments[i + 1] = segments[i]
return segments
def apply_watershed(self, markers):
"""
Apply the watershed algorithm to the source markers.
Parameters
----------
markers : list of `~photutils.segmentation.SegmentationImage`
A list of segmentation images that contain possible sources
as markers. The last list element contains all of the
potential source markers.
Returns
-------
segment_data : 2D int `~numpy.ndarray`
A 2D int array containing the deblended source labels. Note
that the source labels may not be consecutive.
"""
from scipy.ndimage import sum_labels
from skimage.segmentation import watershed
# all markers are at the top level
markers = markers[-1].data
# Deblend using watershed. If any source does not meet the contrast
# criterion, then remove the faintest such source and repeat until
# all sources meet the contrast criterion.
remove_marker = True
while remove_marker:
markers = watershed(-self.source_data, markers,
mask=self.segment_mask,
connectivity=self.footprint)
labels = np.unique(markers[markers != 0])
flux_frac = (sum_labels(self.source_data, markers, index=labels)
/ self.source_sum)
remove_marker = any(flux_frac < self.contrast)
if remove_marker:
# remove only the faintest source (one at a time) because
# several faint sources could combine to meet the contrast
# criterion
markers[markers == labels[np.argmin(flux_frac)]] = 0.
return markers
def deblend_source(self):
"""
Deblend a single labeled source.
"""
if self.source_min == self.source_max: # no deblending
return None
segments = self.multithreshold()
if len(segments) == 0: # no deblending
return None
# define the markers (possible sources) for the watershed algorithm
markers = self.make_markers(segments)
# If there are too many markers (e.g., due to low threshold
# and/or small npixels), the watershed step can be very slow
# (the threshold of 200 is arbitrary, but seems to work well).
# This mostly affects the "exponential" mode, where there are
# many levels at low thresholds, so here we try again with
# "linear" mode.
if self.mode != 'linear' and markers[-1].nlabels > 200:
self.warnings['nmarkers'] = 'too many markers'
self.mode = 'linear'
segments = self.multithreshold()
if len(segments) == 0: # no deblending
return None
markers = self.make_markers(segments)
# deblend using the watershed algorithm using the markers as seeds
markers = self.apply_watershed(markers)
if not np.array_equal(self.segment_mask, markers.astype(bool)):
raise ValueError(f'Deblending failed for source "{self.label}". '
'Please ensure you used the same pixel '
'connectivity in detect_sources and '
'deblend_sources.')
labels = np.unique(markers[markers != 0])
if len(labels) == 1: # no deblending
return None
segm_new = object.__new__(SegmentationImage)
segm_new._data = markers
segm_new.__dict__['labels'] = labels
segm_new.relabel_consecutive(start_label=1)
if self.warnings:
segm_new.warnings = self.warnings
return segm_new
|
{
"content_hash": "4284add07dd5a2643f29ea6c669e1fff",
"timestamp": "",
"source": "github",
"line_count": 552,
"max_line_length": 78,
"avg_line_length": 42.15760869565217,
"alnum_prop": 0.6126079669975506,
"repo_name": "astropy/photutils",
"id": "f9efd47002d2f3b879320939fc659fd0f2318d0f",
"size": "23335",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "photutils/segmentation/deblend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cython",
"bytes": "32106"
},
{
"name": "Python",
"bytes": "1447767"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh._testing.util.api import verify_all
# Module under test
#import bokeh.sampledata.les_mis as bsl
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.les_mis", ALL))
@pytest.mark.sampledata
def test_data():
import bokeh.sampledata.les_mis as bsl
assert isinstance(bsl.data, dict)
# check detail for package data
assert set(bsl.data.keys()) == set(['links', 'nodes'])
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
|
{
"content_hash": "dafed40b8257f8a0a9029a3dd7241d66",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 82,
"avg_line_length": 30.375,
"alnum_prop": 0.3230452674897119,
"repo_name": "jakirkham/bokeh",
"id": "1f9975f405aede474ad481e149e0f534e3bf4fdc",
"size": "1978",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bokeh/sampledata/tests/test_les_mis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "102287"
},
{
"name": "CoffeeScript",
"bytes": "413132"
},
{
"name": "Dockerfile",
"bytes": "4099"
},
{
"name": "HTML",
"bytes": "47532"
},
{
"name": "JavaScript",
"bytes": "25172"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "PowerShell",
"bytes": "691"
},
{
"name": "Python",
"bytes": "3332368"
},
{
"name": "Shell",
"bytes": "9209"
},
{
"name": "TypeScript",
"bytes": "1634848"
}
],
"symlink_target": ""
}
|
import sys,ctypes
from twisted.python import filepath
from pyglet import gl
from pyglet import clock
def glvec(*args):
return (gl.GLfloat * len(args))(*args)
## Debugging stuff
def show_caller(level):
l = level + 1
frame = sys._getframe(l)
fname = frame.f_code.co_filename.split(os.path.sep)[-1]
caller = frame.f_code.co_name
lineno = frame.f_lineno
print '%s %s %d' % (fname, caller, lineno)
showCaller = show_caller
_font_styles = {
'mono' : ('VeraMono.ttf', 'Bitstream Vera Sans Mono'),
}
_defaultLoaded = False
def default_font(size=10, style='mono'):
global _defaultLoaded
from pyglet import font
filename,face = _font_styles.get(style, _font_styles['mono'])
if not _defaultLoaded:
fontpath = filepath.FilePath(__file__).parent().child('data').child(filename)
font.add_file(fontpath.path)
_defaultLoaded = True
return font.load(face, size)
defaultFont = default_font
def add_fps_display(context=None):
if not context:
from miru.context import context
f = defaultFont(14)
clock_display = clock.ClockDisplay(font=f, color=(0.,1.,0.,0.5))
context.osd.add_object(clock_display)
addFpsDisplay = add_fps_display
def flatten(l, _cycle_checks=None):
"""Flatten a list of lists.
"""
_cycle_checks = _cycle_checks or []
newl = []
for item in l:
if isinstance(item, list):
if item in _cycle_checks:
raise ValueError, 'Cannot flatten cyclic lists'
_cycle_checks.append(item)
newl.extend(flatten(item, _cycle_checks))
else:
newl.append(item)
return newl
def color(c0, c1=None, c2=None, c3=None):
"""Convert a color of any of the following formats RGB, RGBA, L, LA
from integral form in range [0,255] to floating point represetation
in range [0.0, 1.0]. Internally, miru always uses the latter, so
if you prefer the former, this function is your friend.
"""
if c1 is None and c2 is None and c3 is None:
return c0 / 255.
if c2 is None and c3 is None:
return (c0 / 255., c1 / 255.)
if c3 is None:
return (c0 / 255., c1 / 255., c2 / 255.)
return (c0 / 255., c1 / 255., c2 / 255., c3 / 255.)
def point_in_rect(x, y, rx, ry, width, height):
"""2D point inside rectangle check
"""
return (x >= rx) and (x <= rx + width) and\
(y >= ry) and (y <= ry + height)
def change_cursor(style, window):
cursor = window.get_system_mouse_cursor(style)
window.set_mouse_cursor(cursor)
def scale_texcoords(texcoords, scale=1, format=2):
c = list
if type(texcoords) is tuple:
c = tuple
coords = []
for i in range(0, len(texcoords), format):
if format == 2:
coords.extend(texcoords[i:i+format] + c((0,scale)))
else:
coords.extend(texcoords[i:i+format] + c((scale,)))
return tuple(coords)
def select_object(x, y, objects=None):
from miru.context import context
if objects is None:
objects = context.camera.objects
# following technique is adapted from
# http://www.cse.msu.edu/~cse872/tutorial9.html
w = context.window.width
h = context.window.height
select_buffer = ctypes.cast((100 * gl.GLuint)(), ctypes.POINTER(gl.GLuint))
gl.glSelectBuffer(100, select_buffer)
viewport = (4 * gl.GLint)()
gl.glGetIntegerv(gl.GL_VIEWPORT, viewport)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
# rotate the camera first
angle = context.camera.angle
gl.glRotatef(angle.z, 0, 0, 1)
gl.glRotatef(angle.y, 0, 1, 0)
gl.glRotatef(angle.x, 1, 0, 0)
gl.gluPickMatrix(x, y, 3, 3, viewport)
gl.glRenderMode(gl.GL_SELECT)
gl.gluPerspective(45., w / float(h), 0.1, 1000.)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glInitNames()
gl.glPushName(-1)
context.camera.render(select_pass=1, visible=objects)
gl.glFlush()
hits = gl.glRenderMode(gl.GL_RENDER)
gl.glPopName()
selected = None
if hits:
try:
m = sys.maxint << 100
idx = 0
for i in range(0, 100, 4):
if not select_buffer[i]:
selected = objects[idx]
break
m = min(select_buffer[i+1], m)
if m == select_buffer[i+1]:
idx = select_buffer[i+3]
except IndexError:
pass
context.window.on_resize(context.window.width, context.window.height)
return selected
|
{
"content_hash": "41b400d23ea54903851bb9dfaab0b599",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 85,
"avg_line_length": 28.69182389937107,
"alnum_prop": 0.6067514248136782,
"repo_name": "Knio/miru",
"id": "80356c7a40375b534b5dbf89d1a44b373e5b0dc9",
"size": "4672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "miru/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "378122"
},
{
"name": "Shell",
"bytes": "100"
}
],
"symlink_target": ""
}
|
"""NDArray configuration API."""
import ctypes
from ..base import _LIB
from ..base import c_str_array, c_handle_array
from ..base import NDArrayHandle, CachedOpHandle, SymbolHandle
from ..base import check_call
from .. import _global_var
def _monitor_callback_wrapper(callback):
"""A wrapper for the user-defined handle."""
def callback_handle(name, opr_name, array, _):
""" ctypes function """
callback(name, opr_name, array)
return callback_handle
class NDArrayBase(object):
"""Base data structure for ndarray"""
__slots__ = ["handle", "writable", "_alive"]
# pylint: disable= no-member
def __init__(self, handle, writable=True):
"""initialize a new NDArray
Parameters
----------
handle : NDArrayHandle
NDArray handle of C API
"""
if handle is not None:
assert isinstance(handle, NDArrayHandle)
self.handle = handle
self.writable = writable
self._alive = True
def __del__(self):
check_call(_LIB.MXNDArrayFree(self.handle))
self._alive = False
def __reduce__(self):
return (_global_var._ndarray_cls, (None,), self.__getstate__())
def _imperative_invoke(handle, ndargs, keys, vals, out, is_np_op, output_is_list):
"""ctypes implementation of imperative invoke wrapper"""
if out is not None:
original_output = out
if isinstance(out, NDArrayBase):
out = (out,)
num_output = ctypes.c_int(len(out))
output_vars = c_handle_array(out)
output_vars = ctypes.cast(output_vars, ctypes.POINTER(NDArrayHandle))
else:
original_output = None
output_vars = ctypes.POINTER(NDArrayHandle)()
num_output = ctypes.c_int(0)
# return output stypes to avoid the c_api call for checking
# a handle's stype in _ndarray_cls
out_stypes = ctypes.POINTER(ctypes.c_int)()
check_call(_LIB.MXImperativeInvoke(
ctypes.c_void_p(handle),
ctypes.c_int(len(ndargs)),
c_handle_array(ndargs),
ctypes.byref(num_output),
ctypes.byref(output_vars),
ctypes.c_int(len(keys)),
c_str_array(keys),
c_str_array([str(s) for s in vals]),
ctypes.byref(out_stypes)))
create_ndarray_fn = _global_var._np_ndarray_cls if is_np_op else _global_var._ndarray_cls
if original_output is not None:
return original_output
if num_output.value == 1 and not output_is_list:
return create_ndarray_fn(ctypes.cast(output_vars[0], NDArrayHandle),
stype=out_stypes[0])
else:
return [create_ndarray_fn(ctypes.cast(output_vars[i], NDArrayHandle),
stype=out_stypes[i]) for i in range(num_output.value)]
class CachedOp(object):
"""Cached operator handle."""
__slots__ = ["handle", "is_np_sym", "_monitor_callback"]
def __init__(self, sym, flags=(), thread_safe=False):
self.handle = CachedOpHandle()
self._monitor_callback = None
from ..symbol.numpy._symbol import _Symbol
self.is_np_sym = bool(isinstance(sym, _Symbol))
check_call(_LIB.MXCreateCachedOp(
sym.handle,
len(flags),
c_str_array([key for key, _ in flags]),
c_str_array([str(val) for _, val in flags]),
ctypes.byref(self.handle),
ctypes.c_bool(thread_safe)))
def __del__(self):
check_call(_LIB.MXFreeCachedOp(self.handle))
def get_optimized_symbol(self):
"""Get an optimized version of the symbol from the cached op.
Returns
-------
symbol : Symbol
Optimized symbol from the executor.
"""
from ..symbol import Symbol
sym_handle = SymbolHandle()
check_call(_LIB.MXCachedOpGetOptimizedSymbol(self.handle, ctypes.byref(sym_handle)))
ret = Symbol(sym_handle)
return ret
def __call__(self, *args, **kwargs):
"""ctypes implementation of imperative invoke wrapper"""
out = kwargs.pop('out', None)
default_ctx = kwargs.pop('default_ctx', None)
if out is not None:
original_output = out
if isinstance(out, NDArrayBase):
out = (out,)
num_output = ctypes.c_int(len(out))
output_vars = c_handle_array(out)
output_vars = ctypes.cast(output_vars, ctypes.POINTER(NDArrayHandle))
else:
original_output = None
output_vars = ctypes.POINTER(NDArrayHandle)()
num_output = ctypes.c_int(0)
if kwargs:
raise TypeError(
"CachedOp.__call__ got unexpected keyword argument(s): " + \
', '.join(kwargs.keys()))
# return output stypes to avoid the c_api call for checking
# a handle's stype in _ndarray_cls
out_stypes = ctypes.POINTER(ctypes.c_int)()
# (None, ) -> []
if len(args) == 1 and args[0] is None:
args = []
assert default_ctx is not None, 'default_ctx is required if no input is provided'
else:
default_ctx = args[0].ctx if default_ctx is None else default_ctx
check_call(_LIB.MXInvokeCachedOp(
self.handle,
ctypes.c_int(len(args)),
c_handle_array(args),
ctypes.c_int(default_ctx.device_typeid),
ctypes.c_int(default_ctx.device_id),
ctypes.byref(num_output),
ctypes.byref(output_vars),
ctypes.byref(out_stypes)))
if original_output is not None:
return original_output
create_ndarray_fn = _global_var._np_ndarray_cls if self.is_np_sym else _global_var._ndarray_cls
if num_output.value == 1:
return create_ndarray_fn(ctypes.cast(output_vars[0], NDArrayHandle),
stype=out_stypes[0])
else:
return [create_ndarray_fn(ctypes.cast(output_vars[i], NDArrayHandle),
stype=out_stypes[i]) for i in range(num_output.value)]
def _register_op_hook(self, callback, monitor_all=False):
"""Install callback for monitor.
Parameters
----------
callback : function
Takes a string for node_name, string for op_name and a NDArrayHandle.
monitor_all : bool, default False
If true, monitor both input _imperative_invoked output, otherwise monitor output only.
"""
cb_type = ctypes.CFUNCTYPE(None, ctypes.c_char_p, ctypes.c_char_p, NDArrayHandle, ctypes.c_void_p)
if callback:
self._monitor_callback = cb_type(_monitor_callback_wrapper(callback))
check_call(_LIB.MXCachedOpRegisterOpHook(
self.handle,
self._monitor_callback,
ctypes.c_int(monitor_all)))
|
{
"content_hash": "65963e4f626e25ad1015c592dc85e2e5",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 106,
"avg_line_length": 36.5026455026455,
"alnum_prop": 0.5860269604290477,
"repo_name": "leezu/mxnet",
"id": "15bdbc4afdc49ce1b719e6dead6d0bdfc2a159e5",
"size": "7820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/mxnet/_ctypes/ndarray.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "233623"
},
{
"name": "C++",
"bytes": "9758652"
},
{
"name": "CMake",
"bytes": "164032"
},
{
"name": "Clojure",
"bytes": "622640"
},
{
"name": "Cuda",
"bytes": "1292731"
},
{
"name": "Dockerfile",
"bytes": "101147"
},
{
"name": "Groovy",
"bytes": "168211"
},
{
"name": "HTML",
"bytes": "40268"
},
{
"name": "Java",
"bytes": "205196"
},
{
"name": "Julia",
"bytes": "445413"
},
{
"name": "Jupyter Notebook",
"bytes": "3660357"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "149220"
},
{
"name": "Perl",
"bytes": "1558421"
},
{
"name": "PowerShell",
"bytes": "9244"
},
{
"name": "Python",
"bytes": "9866322"
},
{
"name": "R",
"bytes": "357982"
},
{
"name": "Raku",
"bytes": "9012"
},
{
"name": "SWIG",
"bytes": "161870"
},
{
"name": "Scala",
"bytes": "1304635"
},
{
"name": "Shell",
"bytes": "458535"
},
{
"name": "Smalltalk",
"bytes": "3497"
}
],
"symlink_target": ""
}
|
nullField = None
class NullField(object):
"""
Represents a null field that does not exists.
"""
def __new__(cls, *args, **kwargs):
global nullField
if nullField is None:
nullField = object.__new__(cls, *args, **kwargs)
return nullField
def __init__(self):
assert self is nullField
def __bool__(self):
"""
NullField is always treated as False.
"""
return False
def __getattr__(self, item):
"""
Any sub field of the NullField is NullField itself.
"""
return self
def __setattr__(self, key, value):
pass
def __len__(self) -> 0:
return 0
def __eq__(self, other):
"""
NullField is always not equal to any other value.
"""
return False
def __ne__(self, other):
return True
def __lt__(self, other):
"""
Comparing NullField to any other value gets False.
"""
return False
def __le__(self, other):
"""
Comparing NullField to any other value gets False.
"""
return False
def __gt__(self, other):
"""
Comparing NullField to any other value gets False.
"""
return False
def __ge__(self, other):
"""
Comparing NullField to any other value gets False.
"""
return False
def __str__(self):
return "nullField"
def __repr__(self):
return 'nullField'
NullField()
if __name__ == '__main__':
assert nullField is NullField()
assert not nullField, repr(nullField)
assert nullField != nullField, repr(nullField)
assert nullField != 0
assert not (nullField > 1)
assert not (nullField < 1)
assert not (nullField < nullField)
assert not (nullField > nullField)
assert bool(nullField) is False
assert nullField != ""
assert nullField != None # noqa
assert nullField is not None
|
{
"content_hash": "265d7075193118b28e7823258edf71cd",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 60,
"avg_line_length": 21.695652173913043,
"alnum_prop": 0.5425851703406813,
"repo_name": "srickardti/openthread",
"id": "19103300b441e2efe3619010fec5c8ef4e7ffeb1",
"size": "3601",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "tests/scripts/thread-cert/pktverify/null_field.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2610"
},
{
"name": "C",
"bytes": "1586867"
},
{
"name": "C++",
"bytes": "8331824"
},
{
"name": "CMake",
"bytes": "109816"
},
{
"name": "Dockerfile",
"bytes": "10410"
},
{
"name": "M4",
"bytes": "32369"
},
{
"name": "Makefile",
"bytes": "192208"
},
{
"name": "Python",
"bytes": "4622817"
},
{
"name": "Shell",
"bytes": "165383"
}
],
"symlink_target": ""
}
|
import pytz
from datetime import datetime
from factory import DjangoModelFactory
from factory.fuzzy import FuzzyDateTime, FuzzyChoice
from mii_interface.models import Report
class ReportFactory(DjangoModelFactory):
class Meta:
model = Report
date = FuzzyDateTime(datetime(2000, 1, 1, tzinfo=pytz.UTC))
report_type = FuzzyChoice(['sorter', 'indexer', 'unpacker', 'rss'])
report_html = '<p>Report<p>'
|
{
"content_hash": "b3704284cb2ef742bee6407b2aedf359",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 71,
"avg_line_length": 25.235294117647058,
"alnum_prop": 0.7319347319347319,
"repo_name": "MiiRaGe/miilibrary",
"id": "39f35f977957c6b6ce053efb5fa08252d02f048b",
"size": "429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mii_interface/factories.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "222"
},
{
"name": "HTML",
"bytes": "14946"
},
{
"name": "Python",
"bytes": "226463"
},
{
"name": "Shell",
"bytes": "1581"
}
],
"symlink_target": ""
}
|
{{ license }}
# Author: {{ author }}
class CommonError(Exception):
message = "An unknown exception occurred."
errcode = -1
def __init__(self, **kwargs):
super(CommonError, self).__init__(self.message % kwargs)
self.msg = self.message % kwargs
def __unicode__(self):
return unicode(self.msg)
def use_fatal_exceptions(self):
return False
|
{
"content_hash": "4a959bd56298aa2e9d13ef04b94a2e05",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 64,
"avg_line_length": 23.11764705882353,
"alnum_prop": 0.6030534351145038,
"repo_name": "jianingy/codeskel",
"id": "152de9d1da57777163fcf2c6d17f561643c52ace",
"size": "443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skels/flask/{{ project_name }}/common/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14838"
}
],
"symlink_target": ""
}
|
import sys
from search import views as search_views
from dashboard import views as dashboard_views
from dashboard import api as dashboard_api
if sys.version_info[0] > 2:
from django.urls import re_path
else:
from django.conf.urls import url as re_path
urlpatterns = [
re_path(r'^install_examples$', search_views.install_examples, name='install_examples'),
]
# Those are all deprecated and dashboard.urls.py is the new reference.
urlpatterns += [
re_path(r'^$', dashboard_views.index, name='index'),
re_path(r'^m$', dashboard_views.index_m, name='index_m'),
re_path(r'^save$', dashboard_views.save, name='save'),
re_path(r'^new_search', dashboard_views.new_search, name='new_search'),
re_path(r'^browse/(?P<name>.+)', dashboard_views.browse, name='browse'),
re_path(r'^browse_m/(?P<name>.+)', dashboard_views.browse_m, name='browse_m'),
# Admin
re_path(r'^admin/collections$', dashboard_views.admin_collections, name='admin_collections'),
re_path(r'^admin/collection_delete$', dashboard_views.admin_collection_delete, name='admin_collection_delete'),
re_path(r'^admin/collection_copy$', dashboard_views.admin_collection_copy, name='admin_collection_copy'),
]
urlpatterns += [
re_path(r'^search$', dashboard_api.search, name='search'),
re_path(r'^suggest/?$', dashboard_api.query_suggest, name='query_suggest'),
re_path(r'^index/fields/dynamic$', dashboard_api.index_fields_dynamic, name='index_fields_dynamic'),
re_path(r'^index/fields/nested_documents', dashboard_api.nested_documents, name='nested_documents'),
re_path(r'^template/new_facet$', dashboard_api.new_facet, name='new_facet'),
re_path(r'^get_document$', dashboard_api.get_document, name='get_document'),
re_path(r'^update_document$', dashboard_api.update_document, name='update_document'),
re_path(r'^get_range_facet$', dashboard_api.get_range_facet, name='get_range_facet'),
re_path(r'^download$', dashboard_api.download, name='download'),
re_path(r'^get_timeline$', dashboard_api.get_timeline, name='get_timeline'),
re_path(r'^get_collection$', dashboard_api.get_collection, name='get_collection'),
re_path(r'^get_collections$', dashboard_api.get_collections, name='get_collections'),
re_path(r'^get_stats$', dashboard_api.get_stats, name='get_stats'),
re_path(r'^get_terms$', dashboard_api.get_terms, name='get_terms'),
]
|
{
"content_hash": "47f956ba9ac6c217dbd2aad040d3ff16",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 113,
"avg_line_length": 47.95918367346939,
"alnum_prop": 0.7148936170212766,
"repo_name": "cloudera/hue",
"id": "fb61c3ea5bc84670cc280019cafbb4812e9292fe",
"size": "3142",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "apps/search/src/search/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "2347"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "Batchfile",
"bytes": "143575"
},
{
"name": "C",
"bytes": "5129166"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "718011"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "680715"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "Closure Templates",
"bytes": "1072"
},
{
"name": "CoffeeScript",
"bytes": "403"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cython",
"bytes": "1016963"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Dockerfile",
"bytes": "13576"
},
{
"name": "EJS",
"bytes": "752"
},
{
"name": "Eiffel",
"bytes": "375"
},
{
"name": "Elixir",
"bytes": "692"
},
{
"name": "Elm",
"bytes": "487"
},
{
"name": "Emacs Lisp",
"bytes": "411907"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "FreeMarker",
"bytes": "1017"
},
{
"name": "G-code",
"bytes": "521"
},
{
"name": "GAP",
"bytes": "29873"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Gherkin",
"bytes": "699"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "28328425"
},
{
"name": "Haml",
"bytes": "920"
},
{
"name": "Handlebars",
"bytes": "173"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "HiveQL",
"bytes": "43"
},
{
"name": "Io",
"bytes": "140"
},
{
"name": "Java",
"bytes": "457398"
},
{
"name": "JavaScript",
"bytes": "39181239"
},
{
"name": "Jinja",
"bytes": "356"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Lean",
"bytes": "213"
},
{
"name": "Less",
"bytes": "396102"
},
{
"name": "Lex",
"bytes": "218764"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "78382"
},
{
"name": "M4",
"bytes": "1751"
},
{
"name": "MATLAB",
"bytes": "203"
},
{
"name": "Makefile",
"bytes": "1025937"
},
{
"name": "Mako",
"bytes": "3644004"
},
{
"name": "Mask",
"bytes": "597"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "333"
},
{
"name": "PHP",
"bytes": "662"
},
{
"name": "PLSQL",
"bytes": "29403"
},
{
"name": "PLpgSQL",
"bytes": "6006"
},
{
"name": "Pascal",
"bytes": "84273"
},
{
"name": "Perl",
"bytes": "4327"
},
{
"name": "PigLatin",
"bytes": "371"
},
{
"name": "PowerShell",
"bytes": "6235"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Pug",
"bytes": "584"
},
{
"name": "Python",
"bytes": "92881549"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Roff",
"bytes": "484108"
},
{
"name": "Ruby",
"bytes": "1098"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "SCSS",
"bytes": "78508"
},
{
"name": "Sass",
"bytes": "770"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "249165"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "SourcePawn",
"bytes": "948"
},
{
"name": "Stylus",
"bytes": "682"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "165743"
},
{
"name": "Thrift",
"bytes": "341963"
},
{
"name": "Twig",
"bytes": "761"
},
{
"name": "TypeScript",
"bytes": "1241396"
},
{
"name": "VBScript",
"bytes": "938"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Vim Snippet",
"bytes": "226931"
},
{
"name": "Vue",
"bytes": "350385"
},
{
"name": "XQuery",
"bytes": "114"
},
{
"name": "XSLT",
"bytes": "522199"
},
{
"name": "Yacc",
"bytes": "1070437"
},
{
"name": "jq",
"bytes": "4"
}
],
"symlink_target": ""
}
|
"""
.. module:: upload.constants
"""
CONTENT_RANGE_HEADER = 'Content-Range'
MAX_CHUNKSIZE = 512000 # 512 kb
ALLOWED_MIMETYPES = [
'video/x-msvideo',
'image/x-ms-bmp',
'application/msword',
'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'application/vnd.openxmlformats-officedocument.presentationml.slideshow',
'video/x-flv',
'image/gif',
'image/jpeg',
'image/bmp',
'video/quicktime',
'audio/mpeg',
'video/mp4',
'video/avi',
'application/vnd.oasis.opendocument.formula',
'application/vnd.oasis.opendocument.graphics',
'application/vnd.oasis.opendocument.presentation',
'application/vnd.oasis.opendocument.spreadsheet',
'application/vnd.oasis.opendocument.text',
'application/pdf',
'image/png',
'application/vnd.ms-word',
'application/vnd.ms-excel',
'application/vnd.ms-office',
'application/vnd.ms-powerpoint',
'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'text/plain',
'text/rtf',
'image/tiff',
'image/tif',
'audio/x-wav',
'audio/wav',
'audio/mp3',
'video/x-ms-asf',
'video/x-ms-wma',
'video/x-ms-wmv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
]
|
{
"content_hash": "337688f1b2600eac5e16ac302ac9d3a6",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 80,
"avg_line_length": 27.29787234042553,
"alnum_prop": 0.6718628215120811,
"repo_name": "CityOfNewYork/NYCOpenRecords",
"id": "962de66e7606f90eae776313c810d306f3bf426c",
"size": "1283",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "app/upload/constants/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "40622"
},
{
"name": "HCL",
"bytes": "270"
},
{
"name": "HTML",
"bytes": "560649"
},
{
"name": "JavaScript",
"bytes": "219182"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Procfile",
"bytes": "26"
},
{
"name": "Python",
"bytes": "998502"
},
{
"name": "Ruby",
"bytes": "4498"
},
{
"name": "Shell",
"bytes": "52597"
},
{
"name": "TeX",
"bytes": "2379"
}
],
"symlink_target": ""
}
|
__all__ = ['subsystem_apply']
from itertools import product
from numpy import transpose, sqrt, arange, array, isreal, zeros, shape
from numpy import hstack, vsplit, hsplit, reshape
from scipy.linalg import eig
from qutip.qobj import Qobj, issuper, isket, isoper
from qutip.states import ket2dm
from qutip.operators import qeye
from qutip.superoperator import vec2mat
from qutip.superop_reps import super_to_choi
from qutip.tensor import tensor
def subsystem_apply(state, channel, mask, reference=False):
"""
Returns the result of applying the propagator `channel` to the
subsystems indicated in `mask`, which comprise the density operator
`state`.
Parameters
----------
state : :class:`qutip.qobj`
A density matrix or ket.
channel : :class:`qutip.qobj`
A propagator, either an `oper` or `super`.
mask : *list* / *array*
A mask that selects which subsystems should be subjected to the
channel.
reference : bool
Decides whether explicit Kraus map should be used to evaluate action
of channel.
Returns
-------
rho_out: :class:`qutip.qobj`
A density matrix with the selected subsystems transformed
according to the specified channel.
"""
# TODO: Include sparse/dense methods a la partial_transpose.
# ---Sanity Checks---#
# state must be a ket or density matrix, channel must be a propagator.
assert state.type == 'ket' or state.type == 'oper', "Input state must be\
a ket or oper, given: " + repr(state.type)
assert channel.type == 'super' or channel.type == 'oper', "Input channel \
must be a super or oper, given: " + repr(channel.type)
# Since there's only one channel, all affected subsystems must have
# the same dimensions:
aff_subs_dim_ar = transpose(array(state.dims))[array(mask)]
assert all([(aff_subs_dim_ar[j] == aff_subs_dim_ar[0]).all()
for j in range(len(aff_subs_dim_ar))]), \
"Affected subsystems must have the same dimension. Given:" +\
repr(aff_subs_dim_ar)
# If the map is on the Hilbert space, it must have the same dimension
# as the affected subsystem. If it is on the Liouville space, it must
# exist on a space as large as the square of the Hilbert dimension.
if issuper(channel):
required_shape = tuple(map(lambda x: x ** 2, aff_subs_dim_ar[0]))
else:
required_shape = aff_subs_dim_ar[0]
assert array([channel.shape == required_shape]).all(), \
"Superoperator dimension must be the " + \
"subsystem dimension squared, given: " \
+ repr(channel.shape)
# Ensure mask is an array:
mask = array(mask)
if reference:
return _subsystem_apply_reference(state, channel, mask)
if state.type == 'oper':
return _subsystem_apply_dm(state, channel, mask)
else:
return _subsystem_apply_ket(state, channel, mask)
def _subsystem_apply_ket(state, channel, mask):
# TODO Write more efficient code for single-matrix map on pure states
# TODO Write more efficient code for single-subsystem map . . .
return _subsystem_apply_dm(ket2dm(state), channel, mask)
def _subsystem_apply_dm(state, channel, mask):
"""
Applies a channel to every subsystem indicated by a mask, by
repeatedly applying the channel to each affected subsystem.
"""
# Initialize Output Matrix
rho_out = state
# checked affected subsystems print arange(len(state.dims[0]))[mask]
for subsystem in arange(len(state.dims[0]))[mask]:
rho_out = _one_subsystem_apply(rho_out, channel, subsystem)
return rho_out
def _one_subsystem_apply(state, channel, idx):
"""
Applies a channel to a state on one subsystem, by breaking it into
blocks and applying a reduced channel to each block.
"""
subs_dim_ar = array(state.dims).T
# Calculate number of blocks
n_blks = 1
for mul_idx in range(idx):
# print mul_idx
# print subs_dim_ar[mul_idx]
n_blks = n_blks * subs_dim_ar[mul_idx][0]
blk_sz = state.shape[0] // n_blks
# Apply channel to top subsystem of each block in matrix
full_data_matrix = state.data.todense()
if isreal(full_data_matrix).all():
full_data_matrix = full_data_matrix.astype(complex)
for blk_r in range(n_blks):
for blk_c in range(n_blks):
# Apply channel to high-level blocks of matrix:
blk_rx = blk_r * blk_sz
blk_cx = blk_c * blk_sz
full_data_matrix[blk_rx:blk_rx + blk_sz, blk_cx:blk_cx + blk_sz] =\
_block_apply(
full_data_matrix[
blk_rx:blk_rx + blk_sz, blk_cx:blk_cx + blk_sz],
channel)
return Qobj(dims=state.dims, inpt=full_data_matrix)
def _block_apply(block, channel):
if isoper(channel):
block = _top_apply_U(block, channel)
elif issuper(channel):
block = _top_apply_S(block, channel)
return block
def _top_apply_U(block, channel):
"""
Uses scalar-matrix multiplication to efficiently apply a channel to
the leftmost register in the tensor product, given a unitary matrix
for a channel.
"""
if isreal(block).all():
block = block.astype(complex)
split_mat = _block_split(block, *channel.shape)
temp_split_mat = zeros(shape(split_mat)).astype(complex)
for dm_row_idx in range(channel.shape[0]):
for dm_col_idx in range(channel.shape[1]):
for op_row_idx in range(channel.shape[0]):
for op_col_idx in range(channel.shape[1]):
temp_split_mat[dm_row_idx][dm_col_idx] =\
temp_split_mat[dm_row_idx][dm_col_idx] +\
channel[dm_row_idx, op_col_idx] *\
channel[dm_col_idx, op_row_idx].conjugate() *\
split_mat[op_col_idx][op_row_idx]
return _block_join(temp_split_mat)
def _top_apply_S(block, channel):
# If the channel is a super-operator,
# perform second block decomposition; block-size
# matches Hilbert space of affected subsystem:
# FIXME use state shape?
n_v = int(sqrt(channel.shape[0]))
n_h = int(sqrt(channel.shape[1]))
column = _block_col(block, n_v, n_h)
chan_mat = channel.data.todense()
temp_col = zeros(shape(column)).astype(complex)
# print chan_mat.shape
for row_idx in range(len(chan_mat)):
row = chan_mat[row_idx]
# print [scal[0,0]*mat for (scal,mat) in zip(transpose(row),column)]
temp_col[row_idx] = sum([s[0, 0] * mat
for (s, mat) in zip(transpose(row), column)])
return _block_stack(temp_col, n_v, n_h)
def _block_split(mat_in, n_v, n_h):
"""
Returns a 4D array of matrices, splitting mat_in into
n_v * n_h square sub-arrays.
"""
return list(map(lambda x: hsplit(x, n_h), vsplit(mat_in, n_v)))
def _block_join(mat_in):
return hstack(hstack(mat_in))
def _block_col(mat_in, n_v, n_h):
"""
Returns a 3D array of matrices, splitting mat_in into
n_v * n_h square sub-arrays.
"""
rows, cols = shape(mat_in)
return reshape(
array(_block_split(mat_in, n_v, n_h)).transpose(1, 0, 2, 3),
(n_v * n_h, rows // n_v, cols // n_h))
def _block_stack(arr_in, n_v, n_h):
"""
Inverse of _block_split
"""
rs, cs = shape(arr_in)[-2:]
temp = list(map(transpose, arr_in))
# print shape(arr_in)
temp = reshape(temp, (n_v, n_h, rs, cs))
return hstack(hstack(temp)).T
def _subsystem_apply_reference(state, channel, mask):
if isket(state):
state = ket2dm(state)
if isoper(channel):
full_oper = tensor([channel if mask[j]
else qeye(state.dims[0][j])
for j in range(len(state.dims[0]))])
return full_oper * state * full_oper.dag()
else:
# Go to Choi, then Kraus
# chan_mat = array(channel.data.todense())
choi_matrix = super_to_choi(channel)
vals, vecs = eig(choi_matrix.full())
vecs = list(map(array, zip(*vecs)))
kraus_list = [sqrt(vals[j]) * vec2mat(vecs[j])
for j in range(len(vals))]
# Kraus operators to be padded with identities:
k_qubit_kraus_list = product(kraus_list, repeat=sum(mask))
rho_out = Qobj(inpt=zeros(state.shape), dims=state.dims)
for operator_iter in k_qubit_kraus_list:
operator_iter = iter(operator_iter)
op_iter_list = [next(operator_iter) if mask[j]
else qeye(state.dims[0][j])
for j in range(len(state.dims[0]))]
full_oper = tensor(list(map(Qobj, op_iter_list)))
rho_out = rho_out + full_oper * state * full_oper.dag()
return Qobj(rho_out)
|
{
"content_hash": "d37ef2abc850d30155ad1bfbc0719d95",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 79,
"avg_line_length": 34.261538461538464,
"alnum_prop": 0.609452177817692,
"repo_name": "anubhavvardhan/qutip",
"id": "4b3ff19a4d7a63d5bb96068d08f195f6ef7e9486",
"size": "10775",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "qutip/subsystem_apply.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2339"
},
{
"name": "Fortran",
"bytes": "259707"
},
{
"name": "Makefile",
"bytes": "3079"
},
{
"name": "Python",
"bytes": "2164445"
}
],
"symlink_target": ""
}
|
import pyopencl as cl
import numpy as np
import time # For measure the running times
VECTOR_SIZE = 50000 # Elements of vector
# Create two random vectors a & b
a_host = np.random.rand(VECTOR_SIZE).astype(np.float32)
b_host = np.random.rand(VECTOR_SIZE).astype(np.float32)
# Create a empyy vector for the result
res_host= np.zeros(VECTOR_SIZE).astype(np.float32)
# Create CL context
platform = cl.get_platforms()[0]
device = platform.get_devices()[0] #get first gpu available
print "Running: ", platform
print "In GPU: ", device
ctx = cl.Context([device])
queue = cl.CommandQueue(ctx)
# Transfer host (CPU) memory to device (GPU) memory
mf = cl.mem_flags
a_gpu = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a_host)
b_gpu = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=b_host)
# Kernel code
prg = cl.Program(ctx, """
__kernel void sum(__global const float *a_gpu, __global const float *b_gpu, __global float *res_gpu) {
int gid = get_global_id(0);
res_gpu[gid] = a_gpu[gid] * b_gpu[gid];
}
""").build()
# Create empty gpu array for the result
res_gpu = cl.Buffer(ctx, mf.WRITE_ONLY, a_host.nbytes)
tic=time.time()
# Operation using the GPU - call the kernel on the card
prg.sum(queue, a_host.shape, None, a_gpu, b_gpu, res_gpu)
time_gpu=time.time()-tic
#Clear GPU resources
res_host = np.empty_like(a_host)
cl.enqueue_copy(queue, res_host, res_gpu)
# Check on CPU with Numpy:
print(res_host - (a_host * b_host))
print(np.linalg.norm(res_host - (a_host * b_host)))
#if 0 = good
tic=time.time()
#Operation using the cpu
for i in range(0,VECTOR_SIZE):
res_host[i]=a_host[i]*b_host[i]
time_cpu=time.time()-tic
# Print the results
print "-" * 80
print "Vector Multiplication"
print "Vector Size:", VECTOR_SIZE
print "Time CPU:", time_cpu
print "Time GPU:", time_gpu
|
{
"content_hash": "5a56ece5dda568dc21962c35ad7091ba",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 102,
"avg_line_length": 25.208333333333332,
"alnum_prop": 0.6942148760330579,
"repo_name": "javierip/parallel-processing-teaching-toolkit",
"id": "5144c763b7cb3c07dce78a7a1f49f3b87575335f",
"size": "1995",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "04-GPU-accelerators/04-PyOpenCL/03-vector_multi/vector_multi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "26237"
},
{
"name": "C++",
"bytes": "372236"
},
{
"name": "CMake",
"bytes": "50866"
},
{
"name": "CSS",
"bytes": "9056"
},
{
"name": "Cuda",
"bytes": "52396"
},
{
"name": "HTML",
"bytes": "3717"
},
{
"name": "JavaScript",
"bytes": "536"
},
{
"name": "Makefile",
"bytes": "86"
},
{
"name": "Python",
"bytes": "83274"
},
{
"name": "QMake",
"bytes": "2523"
},
{
"name": "Scala",
"bytes": "616"
},
{
"name": "Shell",
"bytes": "4876"
}
],
"symlink_target": ""
}
|
import pyd.support
from pyd.support import setup, Extension, pydexe_sanity_check
pydexe_sanity_check()
projName = 'class_wrap'
ext_modules = setup(
name=projName,
version='1.0',
ext_modules=[
Extension("class_wrap", ["class_wrap.d"],
d_unittest=True,
build_deimos=True,
d_lump=True,
)
],
)
|
{
"content_hash": "f594e3b7082666eca443f327e72c877c",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 61,
"avg_line_length": 23.4375,
"alnum_prop": 0.5653333333333334,
"repo_name": "John-Colvin/pyd",
"id": "41daefb7380142574bfa5e43abcea60fd390ab29",
"size": "375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/pyd_unittests/class_wrap/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "D",
"bytes": "509104"
},
{
"name": "Makefile",
"bytes": "1277"
},
{
"name": "Python",
"bytes": "9337"
}
],
"symlink_target": ""
}
|
""" Basic MORSE simulation scene using the under_water environment. Used to
evaluate the correct handling of the HVL. It represents a simplified UOX
fuel element surrounded by water.
Run with a command line argument "debug" to connect to the PyCharm remote
debugger (you have to alter the path to the library to match your setup).
"""
from morse.builder import *
from morse.builder.robots.morserobots import ATRV
import math
from nuclear_radiation_sensor.builder.sensors.nuclear_radiation import \
NuclearRadiation
from nuclear_radiation_sensor.tools.debughelper import RemoteDebugHelper
import sys
if sys.argv[-1] == "debug":
RemoteDebugHelper(12345, "/opt/pycharm-4.0.4/pycharm-debug.egg").connect()
# add the robot (iRobot ATRV)
robot = ATRV()
robot.set_mass(50.0)
robot.location = (0.0, 0.0, 0.0)
robot.rotate(z=math.pi)
# add a motion controller
motion = MotionVW()
motion.add_stream("ros")
robot.append(motion)
robot.properties(GroundRobot=True)
# Add a keyboard controller to move the robot with arrow keys.
keyboard = Keyboard()
robot.append(keyboard)
keyboard.properties(ControlType="Position")
# add a pose sensor that exports the current location and orientation
# of the robot in the world frame
pose = Pose()
pose.add_stream("ros")
robot.append(pose)
# add the radiation sensor
radiation = NuclearRadiation()
radiation.properties(surrounding_material_name="Water")
radiation.add_stream("ros", "nuclear_radiation_sensor.middleware.ros.radiation.RadiationPublisher")
radiation.translate(x=0.6, z=0.07)
robot.append(radiation)
# set 'fastmode' to True to switch to wireframe mode
env = Environment('data/environments/under_water.blend', fastmode=False)
env.set_camera_location([10.0, -10.0, 10.0])
env.set_camera_rotation([1.05, 0, 0.78])
|
{
"content_hash": "d6eecba96c6893cf4c75fb25b3c3a367",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 99,
"avg_line_length": 32.72222222222222,
"alnum_prop": 0.7702320316921336,
"repo_name": "mklostermann/morse-radiation_sensor",
"id": "c4ee4aabe7967691e3c14361b38d98ae32248a01",
"size": "1794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "under_water.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "43794"
}
],
"symlink_target": ""
}
|
import sys
# Force the local copy of csbuild to be used rather than the installed copy.
sys.path.insert(0, "../../../")
import csbuild
csbuild.Toolchain( "gcc", "ios" ).Compiler().SetCppStandard( "c++11" )
csbuild.Toolchain( "gcc", "ios" ).SetCppStandardLibrary( "libc++" )
csbuild.DisablePrecompile()
csbuild.DisableChunkedBuild()
OUT_DIR = "out/{project.activeToolchainName}-{project.outputArchitecture}/{project.targetName}"
INT_DIR = "obj/{project.activeToolchainName}-{project.outputArchitecture}/{project.targetName}/{project.name}"
csbuild.SetOutputDirectory( OUT_DIR )
csbuild.SetIntermediateDirectory( INT_DIR )
csbuild.AddIncludeDirectories( "src" )
csbuild.AddLibraryDirectories( OUT_DIR )
@csbuild.project( "sharedLibrary", "src/sharedLibrary" )
def sharedLibrary():
csbuild.SetOutput( "sharedLibrary", csbuild.ProjectType.SharedLibrary )
@csbuild.project( "staticLibrary", "src/staticLibrary" )
def staticLibrary():
csbuild.SetOutput( "staticLibrary", csbuild.ProjectType.StaticLibrary )
@csbuild.project( "loadableModule", "src/loadableModule" )
def loadableModule():
csbuild.SetOutput( "loadableModule", csbuild.ProjectType.LoadableModule )
@csbuild.project( "mainApp", "src/mainApp", ["sharedLibrary", "staticLibrary", "loadableModule"] )
def mainApp():
csbuild.SetOutput( "mainApp", csbuild.ProjectType.Application )
|
{
"content_hash": "43739665318e1b42b30ea8ab89616391",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 110,
"avg_line_length": 33,
"alnum_prop": 0.7634885439763488,
"repo_name": "ShadauxCat/csbuild",
"id": "e4b2088d3d6f00a0126fe3b31b617222fd186cf9",
"size": "1372",
"binary": false,
"copies": "2",
"ref": "refs/heads/Integration",
"path": "UnitTests/OSX/HelloLibraries/make.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "155"
},
{
"name": "C++",
"bytes": "5646"
},
{
"name": "Python",
"bytes": "669149"
},
{
"name": "Shell",
"bytes": "172"
}
],
"symlink_target": ""
}
|
import logging
import datetime
import json
import re
from google.appengine.api import urlfetch
from google.appengine.api import memcache
from api.v2.parking.parkingdata import ParkingData
from api.BeautifulSoup import BeautifulSoup
from api.v2 import api_utils
# Handles fetch, parse and combine of cityparking data
class CityParkingService():
def __init__(self):
self.parking_data = ParkingData().city_data
# orchestrate the heavy lifting
def get_data(self, include_special_events=None):
city_avail_url = self.parking_data['availability_url']
parking_availability_html = self.fetch_availability_html(city_avail_url)
parking_availabilities = self.parse_availability_html(parking_availability_html)
if include_special_events:
special_events = memcache.get('city_special_events')
if special_events is None:
special_events_url = self.parking_data['special_events_url']
special_events_html = self.fetch_special_events_html(special_events_url)
special_events = None
if special_events_html:
special_events = self.parse_special_events_html(special_events_html)
memcache.set('city_special_events', special_events, 86400) # cache for a day
self.fill_cityparking_data_obj(parking_availabilities, special_events)
# Remove location array as it is duplicative in response payload
self.remove_locations_from_special_events()
else:
self.fill_cityparking_data_obj(parking_availabilities)
return self.parking_data['lots']
## end get_data
def fetch_availability_html(self, url):
try:
result = urlfetch.fetch(url)
except urlfetch.DownloadError:
logging.error('Error fetching %s' % url)
raise urlfetch.DownloadError # die hard
return result.content
def parse_availability_html(self, availability_html):
results = []
lot_spots = None
try:
city_lot_soup = BeautifulSoup(availability_html)
# get all children of the availability div whose class name starts with dataRow
lot_rows = city_lot_soup.find('div', {'id': 'availability'})\
.findAll('div', {'class': re.compile('^dataRow')})
if not lot_rows: # if we find no rows, we're dead
raise ValueError
for row in lot_rows:
for detail in row:
if detail.string is not None and detail.string.isdigit():
lot_spots = detail.string
lot_details = {
'name': row.div.a.string,
'openSpots': int(lot_spots)
}
results.append(lot_details)
logging.debug(json.dumps(results))
except ValueError:
# Cannot parse html perhaps due to html change.
logging.error('ValueError parsing scraped content from city parking page.')
raise ValueError
except AttributeError:
# HTML doesn't include expected elements
logging.error('AttributeError parsing scraped content from city parking page.')
raise AttributeError
except TypeError:
# Html is probably None
logging.error('TypeError parsing scraped content from city parking page.')
raise TypeError
return results
## end parse_cityparking_availability_html
def fetch_special_events_html(self, special_events_url):
try:
#grab the city parking html page - what an awesome API!!! :(
result = urlfetch.fetch(special_events_url).content
except urlfetch.DownloadError:
# problem fetching url
logging.error('Error loading page (%s).' % special_events_url)
result = None
return result
## end fetch_parking_special_events_html
def parse_special_event_datetimes(self, table_cells):
event_timestamp = None
str_event_date = table_cells[0].string
# get datetime obj from special event time strings
str_event_time = api_utils.get_time_from_text(table_cells[5].string)
if str_event_time is not '':
event_timestamp = datetime.datetime.strptime(
str_event_date + str_event_time,'%m/%d/%Y%I:%M%p').strftime('%Y-%m-%dT%H:%M:%S')
# split '00:00 pm - 00:00 pm' into start and end strings
time_parts = table_cells[2].string.split(' - ')
# clean up whitespace to avoid errors due to inconsistent format
time_parts[0] = time_parts[0].replace(' ', '')
time_parts[1] = time_parts[1].replace(' ', '')
# transform provided parking_start_time (Central Time)
parking_start_time = datetime.datetime.strptime(
table_cells[0].string + time_parts[0],
'%m/%d/%Y%I:%M%p'
).strftime('%Y-%m-%dT%H:%M:%S')
# transform provided parking_end_time (Central Time)
parking_end_time = datetime.datetime.strptime(
table_cells[0].string + time_parts[1],
'%m/%d/%Y%I:%M%p'
).strftime('%Y-%m-%dT%H:%M:%S')
return event_timestamp, parking_end_time, parking_start_time
## end parse_special_event_datetimes
def parse_special_events_html(self, special_events_html):
special_events = dict()
special_events['specialEvents'] = []
if not special_events_html:
return special_events
try:
soup = BeautifulSoup(special_events_html)
# special_event_rows is array of <tr>'s.
special_event_rows = soup.find('table', {'id': 'calendar'}).findAll('tr')
# loop table rows, starting with 3rd row (excludes 2 header rows)
for row_index in range(2, len(special_event_rows)):
# table_cells is array in the current row
table_cells = special_event_rows[row_index].findAll('td')
parking_location = table_cells[1].string
event_venue = table_cells[4].string
event = table_cells[3].string
event_time, parking_end_time, parking_start_time = self.parse_special_event_datetimes(table_cells)
# add this special event info to the specialEvents collection
special_events['specialEvents'].append(
{
'parkingLocation': parking_location,
'eventVenue': event_venue,
'eventDatetime': event_time,
'eventName': event,
'parkingStartDatetime': parking_start_time,
'parkingEndDatetime': parking_end_time,
'webUrl': self.parking_data['special_events_url']
}
)
except (ValueError, AttributeError, TypeError, IndexError) as e:
# unlike availability, we eat this error. availability is still useful w/out events
logging.error('Error parsing scraped content from city special events page.' + str(e))
special_events['specialEvents'] = []
return special_events
## end parse_cityparking_special_events_html
def fill_cityparking_data_obj(self, parking_availabilities, special_events=None):
for lot in self.parking_data['lots']:
for availability in parking_availabilities:
if availability['name'].lower().find(lot['shortName']) >= 0:
lot['openSpots'] = availability['openSpots']
break
if special_events and (special_events['specialEvents']) > 0:
lot['specialEvents'] = []
for special_event in special_events['specialEvents']:
if special_event['parkingLocation'].lower().find(lot['shortName']) >= 0:
lot['specialEvents'].append(special_event)
## end merge_availability_with_special_events
def remove_locations_from_special_events(self):
for lot in self.parking_data['lots']:
if lot['specialEvents'] and len(lot['specialEvents']) > 0:
for se in lot['specialEvents']:
se.pop('parkingLocation', None)
|
{
"content_hash": "bb9dba6152ad64e9cddc933e622ac647",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 114,
"avg_line_length": 39.370892018779344,
"alnum_prop": 0.5949201049367995,
"repo_name": "gtracy/madison-transit-api",
"id": "9531ccdf2f78c35b9efafe5b9a1254c18d66275c",
"size": "8386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/v2/parking/cityparking.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5995"
},
{
"name": "CSS",
"bytes": "1975"
},
{
"name": "HTML",
"bytes": "67836"
},
{
"name": "JavaScript",
"bytes": "3755"
},
{
"name": "Python",
"bytes": "1778206"
},
{
"name": "Shell",
"bytes": "454"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import unittest
from mock import MagicMock
from mock import call
from mock import patch
from apache_beam.io.gcp.datastore.v1 import fake_datastore
from apache_beam.io.gcp.datastore.v1 import helper
from apache_beam.io.gcp.datastore.v1 import query_splitter
from apache_beam.io.gcp.datastore.v1.datastoreio import ReadFromDatastore
from apache_beam.io.gcp.datastore.v1.datastoreio import WriteToDatastore
from apache_beam.io.gcp.datastore.v1.datastoreio import _Mutate
# Protect against environments where datastore library is not available.
# pylint: disable=wrong-import-order, wrong-import-position, ungrouped-imports
try:
from google.cloud.proto.datastore.v1 import datastore_pb2
from google.cloud.proto.datastore.v1 import query_pb2
from google.protobuf import timestamp_pb2
from googledatastore import helper as datastore_helper
except ImportError:
datastore_pb2 = None
# pylint: enable=wrong-import-order, wrong-import-position, ungrouped-imports
@unittest.skipIf(datastore_pb2 is None, 'GCP dependencies are not installed')
class DatastoreioTest(unittest.TestCase):
_PROJECT = 'project'
_KIND = 'kind'
_NAMESPACE = 'namespace'
def setUp(self):
self._mock_datastore = MagicMock()
self._query = query_pb2.Query()
self._query.kind.add().name = self._KIND
def test_get_estimated_size_bytes_without_namespace(self):
entity_bytes = 100
timestamp = timestamp_pb2.Timestamp(seconds=1234)
self.check_estimated_size_bytes(entity_bytes, timestamp)
def test_get_estimated_size_bytes_with_namespace(self):
entity_bytes = 100
timestamp = timestamp_pb2.Timestamp(seconds=1234)
self.check_estimated_size_bytes(entity_bytes, timestamp, self._NAMESPACE)
def test_SplitQueryFn_with_num_splits(self):
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
num_splits = 23
def fake_get_splits(datastore, query, num_splits, partition=None):
return self.split_query(query, num_splits)
with patch.object(query_splitter, 'get_splits',
side_effect=fake_get_splits):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), num_splits)
self.assertEqual(0, len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_SplitQueryFn_without_num_splits(self):
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
# Force SplitQueryFn to compute the number of query splits
num_splits = 0
expected_num_splits = 23
entity_bytes = (expected_num_splits *
ReadFromDatastore._DEFAULT_BUNDLE_SIZE_BYTES)
with patch.object(ReadFromDatastore, 'get_estimated_size_bytes',
return_value=entity_bytes):
def fake_get_splits(datastore, query, num_splits, partition=None):
return self.split_query(query, num_splits)
with patch.object(query_splitter, 'get_splits',
side_effect=fake_get_splits):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), expected_num_splits)
self.assertEqual(0,
len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_SplitQueryFn_with_query_limit(self):
"""A test that verifies no split is performed when the query has a limit."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
self._query.limit.value = 3
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, 4)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(1, len(returned_split_queries))
self.assertEqual(0, len(self._mock_datastore.method_calls))
def test_SplitQueryFn_with_exception(self):
"""A test that verifies that no split is performed when failures occur."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
# Force SplitQueryFn to compute the number of query splits
num_splits = 0
expected_num_splits = 1
entity_bytes = (expected_num_splits *
ReadFromDatastore._DEFAULT_BUNDLE_SIZE_BYTES)
with patch.object(ReadFromDatastore, 'get_estimated_size_bytes',
return_value=entity_bytes):
with patch.object(query_splitter, 'get_splits',
side_effect=ValueError("Testing query split error")):
split_query_fn = ReadFromDatastore.SplitQueryFn(
self._PROJECT, self._query, None, num_splits)
split_query_fn.start_bundle()
returned_split_queries = []
for split_query in split_query_fn.process(self._query):
returned_split_queries.append(split_query)
self.assertEqual(len(returned_split_queries), expected_num_splits)
self.assertEqual(returned_split_queries[0][1], self._query)
self.assertEqual(0,
len(self._mock_datastore.run_query.call_args_list))
self.verify_unique_keys(returned_split_queries)
def test_DatastoreWriteFn_with_emtpy_batch(self):
self.check_DatastoreWriteFn(0)
def test_DatastoreWriteFn_with_one_batch(self):
num_entities_to_write = _Mutate._WRITE_BATCH_INITIAL_SIZE * 1 - 50
self.check_DatastoreWriteFn(num_entities_to_write)
def test_DatastoreWriteFn_with_multiple_batches(self):
num_entities_to_write = _Mutate._WRITE_BATCH_INITIAL_SIZE * 3 + 50
self.check_DatastoreWriteFn(num_entities_to_write)
def test_DatastoreWriteFn_with_batch_size_exact_multiple(self):
num_entities_to_write = _Mutate._WRITE_BATCH_INITIAL_SIZE * 2
self.check_DatastoreWriteFn(num_entities_to_write)
def check_DatastoreWriteFn(self, num_entities):
"""A helper function to test DatastoreWriteFn."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
entities = [e.entity for e in
fake_datastore.create_entities(num_entities)]
expected_mutations = map(WriteToDatastore.to_upsert_mutation, entities)
actual_mutations = []
self._mock_datastore.commit.side_effect = (
fake_datastore.create_commit(actual_mutations))
datastore_write_fn = _Mutate.DatastoreWriteFn(
self._PROJECT, fixed_batch_size=_Mutate._WRITE_BATCH_INITIAL_SIZE)
datastore_write_fn.start_bundle()
for mutation in expected_mutations:
datastore_write_fn.process(mutation)
datastore_write_fn.finish_bundle()
self.assertEqual(actual_mutations, expected_mutations)
self.assertEqual(
(num_entities - 1) / _Mutate._WRITE_BATCH_INITIAL_SIZE + 1,
self._mock_datastore.commit.call_count)
def test_DatastoreWriteLargeEntities(self):
"""100*100kB entities gets split over two Commit RPCs."""
with patch.object(helper, 'get_datastore',
return_value=self._mock_datastore):
entities = [e.entity for e in fake_datastore.create_entities(100)]
datastore_write_fn = _Mutate.DatastoreWriteFn(
self._PROJECT, fixed_batch_size=_Mutate._WRITE_BATCH_INITIAL_SIZE)
datastore_write_fn.start_bundle()
for entity in entities:
datastore_helper.add_properties(
entity, {'large': u'A' * 100000}, exclude_from_indexes=True)
datastore_write_fn.process(WriteToDatastore.to_upsert_mutation(entity))
datastore_write_fn.finish_bundle()
self.assertEqual(2, self._mock_datastore.commit.call_count)
def verify_unique_keys(self, queries):
"""A helper function that verifies if all the queries have unique keys."""
keys, _ = zip(*queries)
keys = set(keys)
self.assertEqual(len(keys), len(queries))
def check_estimated_size_bytes(self, entity_bytes, timestamp, namespace=None):
"""A helper method to test get_estimated_size_bytes"""
timestamp_req = helper.make_request(
self._PROJECT, namespace, helper.make_latest_timestamp_query(namespace))
timestamp_resp = self.make_stats_response(
{'timestamp': datastore_helper.from_timestamp(timestamp)})
kind_stat_req = helper.make_request(
self._PROJECT, namespace, helper.make_kind_stats_query(
namespace, self._query.kind[0].name,
datastore_helper.micros_from_timestamp(timestamp)))
kind_stat_resp = self.make_stats_response(
{'entity_bytes': entity_bytes})
def fake_run_query(req):
if req == timestamp_req:
return timestamp_resp
elif req == kind_stat_req:
return kind_stat_resp
else:
print(kind_stat_req)
raise ValueError("Unknown req: %s" % req)
self._mock_datastore.run_query.side_effect = fake_run_query
self.assertEqual(entity_bytes, ReadFromDatastore.get_estimated_size_bytes(
self._PROJECT, namespace, self._query, self._mock_datastore))
self.assertEqual(self._mock_datastore.run_query.call_args_list,
[call(timestamp_req), call(kind_stat_req)])
def make_stats_response(self, property_map):
resp = datastore_pb2.RunQueryResponse()
entity_result = resp.batch.entity_results.add()
datastore_helper.add_properties(entity_result.entity, property_map)
return resp
def split_query(self, query, num_splits):
"""Generate dummy query splits."""
split_queries = []
for _ in range(0, num_splits):
q = query_pb2.Query()
q.CopyFrom(query)
split_queries.append(q)
return split_queries
@unittest.skipIf(datastore_pb2 is None, 'GCP dependencies are not installed')
class DynamicWriteBatcherTest(unittest.TestCase):
def setUp(self):
self._batcher = _Mutate._DynamicBatchSizer()
# If possible, keep these test cases aligned with the Java test cases in
# DatastoreV1Test.java
def test_no_data(self):
self.assertEquals(_Mutate._WRITE_BATCH_INITIAL_SIZE,
self._batcher.get_batch_size(0))
def test_fast_queries(self):
self._batcher.report_latency(0, 1000, 200)
self._batcher.report_latency(0, 1000, 200)
self.assertEquals(_Mutate._WRITE_BATCH_MAX_SIZE,
self._batcher.get_batch_size(0))
def test_slow_queries(self):
self._batcher.report_latency(0, 10000, 200)
self._batcher.report_latency(0, 10000, 200)
self.assertEquals(100, self._batcher.get_batch_size(0))
def test_size_not_below_minimum(self):
self._batcher.report_latency(0, 30000, 50)
self._batcher.report_latency(0, 30000, 50)
self.assertEquals(_Mutate._WRITE_BATCH_MIN_SIZE,
self._batcher.get_batch_size(0))
def test_sliding_window(self):
self._batcher.report_latency(0, 30000, 50)
self._batcher.report_latency(50000, 5000, 200)
self._batcher.report_latency(100000, 5000, 200)
self.assertEquals(200, self._batcher.get_batch_size(150000))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "600d70fbf96d85be2214e9ab766c7ec5",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 80,
"avg_line_length": 40.97231833910035,
"alnum_prop": 0.6766320412127355,
"repo_name": "tgroh/beam",
"id": "e131f93d52076f1cd4d4a995bf083bf7ed13e6f0",
"size": "12626",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/io/gcp/datastore/v1/datastoreio_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "FreeMarker",
"bytes": "5994"
},
{
"name": "Go",
"bytes": "2167258"
},
{
"name": "Groovy",
"bytes": "127719"
},
{
"name": "Java",
"bytes": "17206671"
},
{
"name": "Python",
"bytes": "3584300"
},
{
"name": "Shell",
"bytes": "82600"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='BinPy',
version='0.3.1',
author='BinPy Developers',
author_email='binpylib@gmail.com',
url='http://pypi.python.org/pypi/BinPy/',
# license=open('docs/LICENSE.txt').read(),
description='Virtualizing Electronics',
# long_description=open('README.md').read(),
install_requires="networkx >= 1.8.1",
packages=find_packages(),
package_data={'data': ['README.md']},
include_package_data=True,
entry_points={'console_scripts': ['binpy = BinPy.shell:shell_main']}
)
|
{
"content_hash": "914ada64b51a0420098a079d386f30c1",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 72,
"avg_line_length": 33.35294117647059,
"alnum_prop": 0.656084656084656,
"repo_name": "MridulS/BinPy",
"id": "80104da12482161ed1d71ce970f21012e0ca6ed9",
"size": "567",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.